44
#ifndef OPENCV_CORE_CUDAINL_HPP
45
#define OPENCV_CORE_CUDAINL_HPP
47
#include "opencv2/core/cuda.hpp"
51
namespace
cv
{
namespace
cuda {
59
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
63GpuMat::GpuMat(
int
rows_,
int
cols_,
int
type_, Allocator* allocator_)
64
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
66
if
(rows_ > 0 && cols_ > 0)
67
create(rows_, cols_, type_);
71GpuMat::GpuMat(Size size_,
int
type_, Allocator* allocator_)
72
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
74
if
(size_.height > 0 && size_.width > 0)
75
create(size_.height, size_.width, type_);
79GpuMat::GpuMat(
int
rows_,
int
cols_,
int
type_, Scalar s_, Allocator* allocator_)
80
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
82
if
(rows_ > 0 && cols_ > 0)
84
create(rows_, cols_, type_);
90GpuMat::GpuMat(Size size_,
int
type_, Scalar s_, Allocator* allocator_)
91
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
93
if
(size_.height > 0 && size_.width > 0)
95
create(size_.height, size_.width, type_);
101GpuMat::GpuMat(
const
GpuMat& m)
102
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator)
105
CV_XADD(refcount, 1);
109GpuMat::GpuMat(InputArray arr, Allocator* allocator_) :
110
flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
122GpuMat& GpuMat::operator =(
const
GpuMat& m)
134
void
GpuMat::create(Size size_,
int
type_)
136
create(size_.height, size_.width, type_);
154GpuMat GpuMat::clone()
const
164
copyTo(dst, mask, Stream::Null());
168GpuMat& GpuMat::setTo(Scalar s)
170
return
setTo(s, Stream::Null());
174GpuMat& GpuMat::setTo(Scalar s, InputArray mask)
176
return
setTo(s, mask, Stream::Null());
180
void
GpuMat::convertTo(OutputArray dst,
int
rtype)
const
182
convertTo(dst, rtype, Stream::Null());
186
void
GpuMat::convertTo(OutputArray dst,
int
rtype,
double
alpha,
double
beta)
const
188
convertTo(dst, rtype, alpha, beta, Stream::Null());
192
void
GpuMat::convertTo(OutputArray dst,
int
rtype,
double
alpha, Stream& stream)
const
194
convertTo(dst, rtype, alpha, 0.0, stream);
198
void
GpuMat::assignTo(GpuMat& m,
int
_type)
const
207uchar* GpuMat::ptr(
int
y)
210
return
data + step * y;
214
const
uchar* GpuMat::ptr(
int
y)
const
217
return
data + step * y;
220
template<
typename
_Tp>
inline
221_Tp* GpuMat::ptr(
int
y)
226
template<
typename
_Tp>
inline
227
const
_Tp* GpuMat::ptr(
int
y)
const
229
return
(
const
_Tp*)ptr(y);
232
template
<
class
T>
inline
233GpuMat::operator PtrStepSz<T>()
const
235
return
PtrStepSz<T>(rows, cols, (T*)data, step);
238
template
<
class
T>
inline
239GpuMat::operator PtrStep<T>()
const
241
return
PtrStep<T>((T*)data, step);
245GpuMat GpuMat::row(
int
y)
const
247
return
GpuMat(*
this, Range(y, y+1), Range::all());
251GpuMat GpuMat::col(
int
x)
const
253
return
GpuMat(*
this, Range::all(), Range(x, x+1));
257GpuMat GpuMat::rowRange(
int
startrow,
int
endrow)
const
259
return
GpuMat(*
this, Range(startrow, endrow), Range::all());
263GpuMat GpuMat::rowRange(Range r)
const
265
return
GpuMat(*
this, r, Range::all());
269GpuMat GpuMat::colRange(
int
startcol,
int
endcol)
const
271
return
GpuMat(*
this, Range::all(), Range(startcol, endcol));
275GpuMat GpuMat::colRange(Range r)
const
277
return
GpuMat(*
this, Range::all(), r);
281GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_)
const
283
return
GpuMat(*
this, rowRange_, colRange_);
287GpuMat GpuMat::operator ()(Rect roi)
const
289
return
GpuMat(*
this, roi);
293
bool
GpuMat::isContinuous()
const
295
return
(flags & Mat::CONTINUOUS_FLAG) != 0;
299
size_t
GpuMat::elemSize()
const
301
return
CV_ELEM_SIZE(flags);
305
size_t
GpuMat::elemSize1()
const
311
int
GpuMat::type()
const
313
return
CV_MAT_TYPE(flags);
317
int
GpuMat::depth()
const
319
return
CV_MAT_DEPTH(flags);
323
int
GpuMat::channels()
const
325
return
CV_MAT_CN(flags);
329
size_t
GpuMat::step1()
const
331
return
step / elemSize1();
335Size GpuMat::size()
const
337
return
Size(cols, rows);
341
bool
GpuMat::empty()
const
347
void* GpuMat::cudaPtr()
const
381
void
swap(GpuMat& a, GpuMat& b)
391GpuMatND::GpuMatND() :
392
flags(0), dims(0), data(nullptr), offset(0)
397GpuMatND::GpuMatND(SizeArray _size,
int
_type) :
398
flags(0), dims(0), data(nullptr), offset(0)
400
create(std::move(_size), _type);
410
bool
GpuMatND::isContinuous()
const
412
return
(flags & Mat::CONTINUOUS_FLAG) != 0;
416
bool
GpuMatND::isSubmatrix()
const
418
return
(flags & Mat::SUBMATRIX_FLAG) != 0;
422
size_t
GpuMatND::elemSize()
const
424
return
CV_ELEM_SIZE(flags);
428
size_t
GpuMatND::elemSize1()
const
434
bool
GpuMatND::empty()
const
436
return
data ==
nullptr;
440
bool
GpuMatND::external()
const
442
return
!empty() && data_.use_count() == 0;
446uchar* GpuMatND::getDevicePtr()
const
448
return
data + offset;
452
size_t
GpuMatND::total()
const
461
size_t
GpuMatND::totalMemSize()
const
463
return
size[0] * step[0];
467
int
GpuMatND::type()
const
469
return
CV_MAT_TYPE(flags);
477HostMem::HostMem(AllocType alloc_type_)
478
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
483HostMem::HostMem(
const
HostMem& m)
484
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
487
CV_XADD(refcount, 1);
491HostMem::HostMem(
int
rows_,
int
cols_,
int
type_, AllocType alloc_type_)
492
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
494
if
(rows_ > 0 && cols_ > 0)
495
create(rows_, cols_, type_);
499HostMem::HostMem(Size size_,
int
type_, AllocType alloc_type_)
500
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
502
if
(size_.height > 0 && size_.width > 0)
503
create(size_.height, size_.width, type_);
507HostMem::HostMem(InputArray arr, AllocType alloc_type_)
508
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
510
arr.getMat().copyTo(*
this);
520HostMem& HostMem::operator =(
const
HostMem& m)
546HostMem HostMem::clone()
const
548
HostMem m(size(), type(), alloc_type);
549
createMatHeader().copyTo(m);
554
void
HostMem::create(Size size_,
int
type_)
556
create(size_.height, size_.width, type_);
560Mat HostMem::createMatHeader()
const
562
return
Mat(size(), type(), data, step);
566
bool
HostMem::isContinuous()
const
568
return
(flags & Mat::CONTINUOUS_FLAG) != 0;
572
size_t
HostMem::elemSize()
const
574
return
CV_ELEM_SIZE(flags);
578
size_t
HostMem::elemSize1()
const
584
int
HostMem::type()
const
586
return
CV_MAT_TYPE(flags);
590
int
HostMem::depth()
const
592
return
CV_MAT_DEPTH(flags);
596
int
HostMem::channels()
const
598
return
CV_MAT_CN(flags);
602
size_t
HostMem::step1()
const
604
return
step / elemSize1();
608Size HostMem::size()
const
610
return
Size(cols, rows);
614
bool
HostMem::empty()
const
620
void
swap(HostMem& a, HostMem& b)
630Stream::Stream(
const
Ptr<Impl>& impl)
640Event::Event(
const
Ptr<Impl>& impl)
650
bool
TargetArchs::has(
int
major,
int
minor)
652
return
hasPtx(major, minor) || hasBin(major, minor);
656
bool
TargetArchs::hasEqualOrGreater(
int
major,
int
minor)
658
return
hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor);
662DeviceInfo::DeviceInfo()
668DeviceInfo::DeviceInfo(
int
device_id)
671
device_id_ = device_id;
675
int
DeviceInfo::deviceID()
const
681
size_t
DeviceInfo::freeMemory()
const
683
size_t
_totalMemory = 0, _freeMemory = 0;
684
queryMemory(_totalMemory, _freeMemory);
689
size_t
DeviceInfo::totalMemory()
const
691
size_t
_totalMemory = 0, _freeMemory = 0;
692
queryMemory(_totalMemory, _freeMemory);
697
bool
DeviceInfo::supports(
FeatureSet
feature_set)
const
699
int
version = majorVersion() * 10 + minorVersion();
700
return
version >= feature_set;
714
: flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows)
CV_WRAP GpuMat(GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
default constructor
void CV_EXPORTS_W copyTo(InputArray src, OutputArray dst, InputArray mask)
This is an overloaded member function, provided for convenience (python) Copies the matrix to another...
#define CV_ELEM_SIZE1(type)
Definition:
cvdef.h:484
#define CV_Assert(expr)
Checks a condition at runtime and throws exception if it fails
Definition:
base.hpp:342
#define CV_DbgAssert(expr)
Definition:
base.hpp:375
CV_EXPORTS void swap(Mat &a, Mat &b)
Swaps two matrices
FeatureSet
Enumeration providing CUDA computing features.
Definition:
core/cuda.hpp:989
CV_EXPORTS_W int getDevice()
Returns the current device index set by cuda::setDevice or initialized by default.
CV_EXPORTS_W int getCudaEnabledDeviceCount()
Returns the number of installed CUDA-enabled devices.
CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr)
Creates a continuous matrix.
CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
Ensures that the size of a matrix is big enough and the matrix has a proper type.
"black box" representation of the file storage associated with a file on disk.
Definition:
aruco.hpp:75