43
#ifndef OPENCV_CUDA_UTILITY_HPP
44
#define OPENCV_CUDA_UTILITY_HPP
55
namespace
cv
{
namespace
cuda {
namespace
device
57
struct
CV_EXPORTS ThrustAllocator
59
typedef
uchar value_type;
60
virtual
~ThrustAllocator();
61
virtual
__device__ __host__ uchar* allocate(
size_t
numBytes) = 0;
62
virtual
__device__ __host__
void
deallocate(uchar* ptr,
size_t
numBytes) = 0;
63
static
ThrustAllocator& getAllocator();
64
static
void
setAllocator(ThrustAllocator* allocator);
66
#define OPENCV_CUDA_LOG_WARP_SIZE (5)
67
#define OPENCV_CUDA_WARP_SIZE (1 << OPENCV_CUDA_LOG_WARP_SIZE)
68
#define OPENCV_CUDA_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4)
69
#define OPENCV_CUDA_MEM_BANKS (1 << OPENCV_CUDA_LOG_MEM_BANKS)
74
template
<
typename
T>
void
__device__ __host__ __forceinline__
swap(T& a, T& b)
86
explicit
__host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {}
87
__host__ __device__ __forceinline__ SingleMask(
const
SingleMask& mask_): mask(mask_.mask){}
89
__device__ __forceinline__
bool
operator()(
int
y,
int
x)
const
91
return
mask.ptr(y)[x] != 0;
97
struct
SingleMaskChannels
99
__host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_,
int
channels_)
100
: mask(mask_), channels(channels_) {}
101
__host__ __device__ __forceinline__ SingleMaskChannels(
const
SingleMaskChannels& mask_)
102
:mask(mask_.mask), channels(mask_.channels){}
104
__device__ __forceinline__
bool
operator()(
int
y,
int
x)
const
106
return
mask.ptr(y)[x / channels] != 0;
113
struct
MaskCollection
115
explicit
__host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_)
116
: maskCollection(maskCollection_) {}
118
__device__ __forceinline__ MaskCollection(
const
MaskCollection& masks_)
119
: maskCollection(masks_.maskCollection), curMask(masks_.curMask){}
121
__device__ __forceinline__
void
next()
123
curMask = *maskCollection++;
125
__device__ __forceinline__
void
setMask(
int
z)
127
curMask = maskCollection[z];
130
__device__ __forceinline__
bool
operator()(
int
y,
int
x)
const
133
return
curMask.data == 0 || (ForceGlob<uchar>::Load(curMask.ptr(y), x, val), (val != 0));
136
const
PtrStepb* maskCollection;
142
__host__ __device__ __forceinline__ WithOutMask(){}
143
__host__ __device__ __forceinline__ WithOutMask(
const
WithOutMask&){}
145
__device__ __forceinline__
void
next()
const
148
__device__ __forceinline__
void
setMask(
int)
const
152
__device__ __forceinline__
bool
operator()(
int,
int)
const
157
__device__ __forceinline__
bool
operator()(
int,
int,
int)
const
162
static
__device__ __forceinline__
bool
check(
int,
int)
167
static
__device__ __forceinline__
bool
check(
int,
int,
int)
177
template
<
typename
T> __device__ __forceinline__
bool
solve2x2(
const
T A[2][2],
const
T b[2], T x[2])
179
T det = A[0][0] * A[1][1] - A[1][0] * A[0][1];
183
double
invdet = 1.0 / det;
185
x[0] = saturate_cast<T>(invdet * (b[0] * A[1][1] - b[1] * A[0][1]));
187
x[1] = saturate_cast<T>(invdet * (A[0][0] * b[1] - A[1][0] * b[0]));
196
template
<
typename
T> __device__ __forceinline__
bool
solve3x3(
const
T A[3][3],
const
T b[3], T x[3])
198
T det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1])
199
- A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0])
200
+ A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]);
204
double
invdet = 1.0 / det;
206
x[0] = saturate_cast<T>(invdet *
207
(b[0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) -
208
A[0][1] * (b[1] * A[2][2] - A[1][2] * b[2] ) +
209
A[0][2] * (b[1] * A[2][1] - A[1][1] * b[2] )));
211
x[1] = saturate_cast<T>(invdet *
212
(A[0][0] * (b[1] * A[2][2] - A[1][2] * b[2] ) -
213
b[0] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) +
214
A[0][2] * (A[1][0] * b[2] - b[1] * A[2][0])));
216
x[2] = saturate_cast<T>(invdet *
217
(A[0][0] * (A[1][1] * b[2] - b[1] * A[2][1]) -
218
A[0][1] * (A[1][0] * b[2] - b[1] * A[2][0]) +
219
b[0] * (A[1][0] * A[2][1] - A[1][1] * A[2][0])));
CV_EXPORTS void swap(Mat &a, Mat &b)
Swaps two matrices
"black box" representation of the file storage associated with a file on disk.
Definition:
aruco.hpp:75