OpenCV 4.5.3(日本語機械翻訳)
detail/reduce.hpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #ifndef OPENCV_CUDA_REDUCE_DETAIL_HPP
44 #define OPENCV_CUDA_REDUCE_DETAIL_HPP
45
46 #include <thrust/tuple.h>
47 #include "../warp.hpp"
48 #include "../warp_shuffle.hpp"
49
51
52 namespace cv { namespace cuda { namespace device
53{
54 namespace reduce_detail
55 {
56 template <typename T> struct GetType;
57 template <typename T> struct GetType<T*>
58 {
59 typedef T type;
60 };
61 template <typename T> struct GetType<volatile T*>
62 {
63 typedef T type;
64 };
65 template <typename T> struct GetType<T&>
66 {
67 typedef T type;
68 };
69
70 template <unsigned int I, unsigned int N>
71 struct For
72 {
73 template <class PointerTuple, class ValTuple>
74 static __device__ void loadToSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
75 {
76 thrust::get<I>(smem)[tid] = thrust::get<I>(val);
77
78 For<I + 1, N>::loadToSmem(smem, val, tid);
79 }
80 template <class PointerTuple, class ValTuple>
81 static __device__ void loadFromSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
82 {
83 thrust::get<I>(val) = thrust::get<I>(smem)[tid];
84
85 For<I + 1, N>::loadFromSmem(smem, val, tid);
86 }
87
88 template <class PointerTuple, class ValTuple, class OpTuple>
89 static __device__ void merge(const PointerTuple& smem, const ValTuple& val, unsigned int tid, unsigned int delta, const OpTuple& op)
90 {
91 typename GetType<typename thrust::tuple_element<I, PointerTuple>::type>::type reg = thrust::get<I>(smem)[tid + delta];
92 thrust::get<I>(smem)[tid] = thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
93
94 For<I + 1, N>::merge(smem, val, tid, delta, op);
95 }
96 template <class ValTuple, class OpTuple>
97 static __device__ void mergeShfl(const ValTuple& val, unsigned int delta, unsigned int width, const OpTuple& op)
98 {
99 typename GetType<typename thrust::tuple_element<I, ValTuple>::type>::type reg = shfl_down(thrust::get<I>(val), delta, width);
100 thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
101
102 For<I + 1, N>::mergeShfl(val, delta, width, op);
103 }
104 };
105 template <unsigned int N>
106 struct For<N, N>
107 {
108 template <class PointerTuple, class ValTuple>
109 static __device__ void loadToSmem(const PointerTuple&, const ValTuple&, unsigned int)
110 {
111 }
112 template <class PointerTuple, class ValTuple>
113 static __device__ void loadFromSmem(const PointerTuple&, const ValTuple&, unsigned int)
114 {
115 }
116
117 template <class PointerTuple, class ValTuple, class OpTuple>
118 static __device__ void merge(const PointerTuple&, const ValTuple&, unsigned int, unsigned int, const OpTuple&)
119 {
120 }
121 template <class ValTuple, class OpTuple>
122 static __device__ void mergeShfl(const ValTuple&, unsigned int, unsigned int, const OpTuple&)
123 {
124 }
125 };
126
127 template <typename T>
128 __device__ __forceinline__ void loadToSmem(volatile T* smem, T& val, unsigned int tid)
129 {
130 smem[tid] = val;
131 }
132 template <typename T>
133 __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& val, unsigned int tid)
134 {
135 val = smem[tid];
136 }
137 template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
138 typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
139 __device__ __forceinline__ void loadToSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
140 const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
141 unsigned int tid)
142 {
143 For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadToSmem(smem, val, tid);
144 }
145 template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
146 typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
147 __device__ __forceinline__ void loadFromSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
148 const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
149 unsigned int tid)
150 {
151 For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadFromSmem(smem, val, tid);
152 }
153
154 template <typename T, class Op>
155 __device__ __forceinline__ void merge(volatile T* smem, T& val, unsigned int tid, unsigned int delta, const Op& op)
156 {
157 T reg = smem[tid + delta];
158 smem[tid] = val = op(val, reg);
159 }
160 template <typename T, class Op>
161 __device__ __forceinline__ void mergeShfl(T& val, unsigned int delta, unsigned int width, const Op& op)
162 {
163 T reg = shfl_down(val, delta, width);
164 val = op(val, reg);
165 }
166 template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
167 typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
168 class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
169 __device__ __forceinline__ void merge(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
170 const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
171 unsigned int tid,
172 unsigned int delta,
173 const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
174 {
175 For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::merge(smem, val, tid, delta, op);
176 }
177 template <typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
178 class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
179 __device__ __forceinline__ void mergeShfl(const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
180 unsigned int delta,
181 unsigned int width,
182 const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
183 {
184 For<0, thrust::tuple_size<thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9> >::value>::mergeShfl(val, delta, width, op);
185 }
186
187 template <unsigned int N> struct Generic
188 {
189 template <typename Pointer, typename Reference, class Op>
190 static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
191 {
192 loadToSmem(smem, val, tid);
193 if (N >= 32)
194 __syncthreads();
195
196 if (N >= 2048)
197 {
198 if (tid < 1024)
199 merge(smem, val, tid, 1024, op);
200
201 __syncthreads();
202 }
203 if (N >= 1024)
204 {
205 if (tid < 512)
206 merge(smem, val, tid, 512, op);
207
208 __syncthreads();
209 }
210 if (N >= 512)
211 {
212 if (tid < 256)
213 merge(smem, val, tid, 256, op);
214
215 __syncthreads();
216 }
217 if (N >= 256)
218 {
219 if (tid < 128)
220 merge(smem, val, tid, 128, op);
221
222 __syncthreads();
223 }
224 if (N >= 128)
225 {
226 if (tid < 64)
227 merge(smem, val, tid, 64, op);
228
229 __syncthreads();
230 }
231 if (N >= 64)
232 {
233 if (tid < 32)
234 merge(smem, val, tid, 32, op);
235 }
236
237 if (tid < 16)
238 {
239 merge(smem, val, tid, 16, op);
240 merge(smem, val, tid, 8, op);
241 merge(smem, val, tid, 4, op);
242 merge(smem, val, tid, 2, op);
243 merge(smem, val, tid, 1, op);
244 }
245 }
246 };
247
248 template <unsigned int I, typename Pointer, typename Reference, class Op>
249 struct Unroll
250 {
251 static __device__ void loopShfl(Reference val, Op op, unsigned int N)
252 {
253 mergeShfl(val, I, N, op);
254 Unroll<I / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
255 }
256 static __device__ void loop(Pointer smem, Reference val, unsigned int tid, Op op)
257 {
258 merge(smem, val, tid, I, op);
259 Unroll<I / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
260 }
261 };
262 template <typename Pointer, typename Reference, class Op>
263 struct Unroll<0, Pointer, Reference, Op>
264 {
265 static __device__ void loopShfl(Reference, Op, unsigned int)
266 {
267 }
268 static __device__ void loop(Pointer, Reference, unsigned int, Op)
269 {
270 }
271 };
272
273 template <unsigned int N> struct WarpOptimized
274 {
275 template <typename Pointer, typename Reference, class Op>
276 static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
277 {
278 #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
279 CV_UNUSED(smem);
280 CV_UNUSED(tid);
281
282 Unroll<N / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
283 #else
284 loadToSmem(smem, val, tid);
285
286 if (tid < N / 2)
287 Unroll<N / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
288 #endif
289 }
290 };
291
292 template <unsigned int N> struct GenericOptimized32
293 {
294 enum { M = N / 32 };
295
296 template <typename Pointer, typename Reference, class Op>
297 static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
298 {
299 const unsigned int laneId = Warp::laneId();
300
301 #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
302 Unroll<16, Pointer, Reference, Op>::loopShfl(val, op, warpSize);
303
304 if (laneId == 0)
305 loadToSmem(smem, val, tid / 32);
306 #else
307 loadToSmem(smem, val, tid);
308
309 if (laneId < 16)
310 Unroll<16, Pointer, Reference, Op>::loop(smem, val, tid, op);
311
312 __syncthreads();
313
314 if (laneId == 0)
315 loadToSmem(smem, val, tid / 32);
316 #endif
317
318 __syncthreads();
319
320 loadFromSmem(smem, val, tid);
321
322 if (tid < 32)
323 {
324 #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
325 Unroll<M / 2, Pointer, Reference, Op>::loopShfl(val, op, M);
326 #else
327 Unroll<M / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
328 #endif
329 }
330 }
331 };
332
333 template <bool val, class T1, class T2> struct StaticIf;
334 template <class T1, class T2> struct StaticIf<true, T1, T2>
335 {
336 typedef T1 type;
337 };
338 template <class T1, class T2> struct StaticIf<false, T1, T2>
339 {
340 typedef T2 type;
341 };
342
343 template <unsigned int N> struct IsPowerOf2
344 {
345 enum { value = ((N != 0) && !(N & (N - 1))) };
346 };
347
348 template <unsigned int N> struct Dispatcher
349 {
350 typedef typename StaticIf<
351 (N <= 32) && IsPowerOf2<N>::value,
352 WarpOptimized<N>,
353 typename StaticIf<
354 (N <= 1024) && IsPowerOf2<N>::value,
355 GenericOptimized32<N>,
356 Generic<N>
357 >::type
358 >::type reductor;
359 };
360 }
361}}}
362
364
365 #endif // OPENCV_CUDA_REDUCE_DETAIL_HPP
CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1)
Reduces a matrix to a vector.
CV_EXPORTS void merge(const Mat *mv, size_t count, OutputArray dst)
Creates one multi-channel array out of several single-channel ones.
cv
"black box" representation of the file storage associated with a file on disk.
Definition: aruco.hpp:75