OpenCV 4.5.3(日本語機械翻訳)
core/cuda.hpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16 // Third party copyrights are property of their respective owners.
17 //
18 // Redistribution and use in source and binary forms, with or without modification,
19 // are permitted provided that the following conditions are met:
20 //
21 // * Redistribution's of source code must retain the above copyright notice,
22 // this list of conditions and the following disclaimer.
23 //
24 // * Redistribution's in binary form must reproduce the above copyright notice,
25 // this list of conditions and the following disclaimer in the documentation
26 // and/or other materials provided with the distribution.
27 //
28 // * The name of the copyright holders may not be used to endorse or promote products
29 // derived from this software without specific prior written permission.
30 //
31 // This software is provided by the copyright holders and contributors "as is" and
32 // any express or implied warranties, including, but not limited to, the implied
33 // warranties of merchantability and fitness for a particular purpose are disclaimed.
34 // In no event shall the Intel Corporation or contributors be liable for any direct,
35 // indirect, incidental, special, exemplary, or consequential damages
36 // (including, but not limited to, procurement of substitute goods or services;
37 // loss of use, data, or profits; or business interruption) however caused
38 // and on any theory of liability, whether in contract, strict liability,
39 // or tort (including negligence or otherwise) arising in any way out of
40 // the use of this software, even if advised of the possibility of such damage.
41 //
42 //M*/
43
44 #ifndef OPENCV_CORE_CUDA_HPP
45 #define OPENCV_CORE_CUDA_HPP
46
47 #ifndef __cplusplus
48 # error cuda.hpp header must be compiled as C++
49 #endif
50
51 #include "opencv2/core.hpp"
53
65 namespace cv { namespace cuda {
66
69
70 //===================================================================================
71 // GpuMat
72 //===================================================================================
73
105 class CV_EXPORTS_W GpuMat
106{
107 public:
108 class CV_EXPORTS_W Allocator
109 {
110 public:
111 virtual ~Allocator() {}
112
113 // allocator must fill data, step and refcount fields
114 virtual bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize) = 0;
115 virtual void free(GpuMat* mat) = 0;
116 };
117
119 CV_WRAP static GpuMat::Allocator* defaultAllocator();
120 CV_WRAP static void setDefaultAllocator(GpuMat::Allocator* allocator);
121
123 CV_WRAP explicit GpuMat(GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
124
126 CV_WRAP GpuMat(int rows, int cols, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
127 CV_WRAP GpuMat(Size size, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
128
130 CV_WRAP GpuMat(int rows, int cols, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
131 CV_WRAP GpuMat(Size size, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
132
134 CV_WRAP GpuMat(const GpuMat& m);
135
137 GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
138 GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
139
141 CV_WRAP GpuMat(const GpuMat& m, Range rowRange, Range colRange);
142 CV_WRAP GpuMat(const GpuMat& m, Rect roi);
143
145 CV_WRAP explicit GpuMat(InputArray arr, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
146
149
151 GpuMat& operator =(const GpuMat& m);
152
154 CV_WRAP void create(int rows, int cols, int type);
155 CV_WRAP void create(Size size, int type);
156
158 void release();
159
161 CV_WRAP void swap(GpuMat& mat);
162
168 CV_WRAP void upload(InputArray arr);
169
178 CV_WRAP void upload(InputArray arr, Stream& stream);
179
185 CV_WRAP void download(OutputArray dst) const;
186
195 CV_WRAP void download(OutputArray dst, Stream& stream) const;
196
198 CV_WRAP GpuMat clone() const;
199
201 CV_WRAP void copyTo(OutputArray dst) const;
202
204 CV_WRAP void copyTo(OutputArray dst, Stream& stream) const;
205
207 CV_WRAP void copyTo(OutputArray dst, InputArray mask) const;
208
210 CV_WRAP void copyTo(OutputArray dst, InputArray mask, Stream& stream) const;
211
213 CV_WRAP GpuMat& setTo(Scalar s);
214
216 CV_WRAP GpuMat& setTo(Scalar s, Stream& stream);
217
219 CV_WRAP GpuMat& setTo(Scalar s, InputArray mask);
220
222 CV_WRAP GpuMat& setTo(Scalar s, InputArray mask, Stream& stream);
223
225 CV_WRAP void convertTo(OutputArray dst, int rtype) const;
226
228 CV_WRAP void convertTo(OutputArray dst, int rtype, Stream& stream) const;
229
231 CV_WRAP void convertTo(OutputArray dst, int rtype, double alpha, double beta = 0.0) const;
232
234 CV_WRAP void convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const;
235
237 CV_WRAP void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const;
238
239 CV_WRAP void assignTo(GpuMat& m, int type = -1) const;
240
242 uchar* ptr(int y = 0);
243 const uchar* ptr(int y = 0) const;
244
246 template<typename _Tp> _Tp* ptr(int y = 0);
247 template<typename _Tp> const _Tp* ptr(int y = 0) const;
248
249 template <typename _Tp> operator PtrStepSz<_Tp>() const;
250 template <typename _Tp> operator PtrStep<_Tp>() const;
251
253 CV_WRAP GpuMat row(int y) const;
254
256 CV_WRAP GpuMat col(int x) const;
257
259 CV_WRAP GpuMat rowRange(int startrow, int endrow) const;
260 CV_WRAP GpuMat rowRange(Range r) const;
261
263 CV_WRAP GpuMat colRange(int startcol, int endcol) const;
264 CV_WRAP GpuMat colRange(Range r) const;
265
267 GpuMat operator ()(Range rowRange, Range colRange) const;
268 GpuMat operator ()(Rect roi) const;
269
272 CV_WRAP GpuMat reshape(int cn, int rows = 0) const;
273
275 CV_WRAP void locateROI(Size& wholeSize, Point& ofs) const;
276
278 CV_WRAP GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright);
279
282 CV_WRAP bool isContinuous() const;
283
285 CV_WRAP size_t elemSize() const;
286
288 CV_WRAP size_t elemSize1() const;
289
291 CV_WRAP int type() const;
292
294 CV_WRAP int depth() const;
295
297 CV_WRAP int channels() const;
298
300 CV_WRAP size_t step1() const;
301
303 CV_WRAP Size size() const;
304
306 CV_WRAP bool empty() const;
307
308 // returns pointer to cuda memory
309 CV_WRAP void* cudaPtr() const;
310
312 CV_WRAP void updateContinuityFlag();
313
320 int flags;
321
323 int rows, cols;
324
326 CV_PROP size_t step;
327
329 uchar* data;
330
334
336 uchar* datastart;
337 const uchar* dataend;
338
341};
342
343 struct CV_EXPORTS_W GpuData
344{
345 explicit GpuData(size_t _size);
346 ~GpuData();
347
348 GpuData(const GpuData&) = delete;
349 GpuData& operator=(const GpuData&) = delete;
350
351 GpuData(GpuData&&) = delete;
352 GpuData& operator=(GpuData&&) = delete;
353
354 uchar* data;
355 size_t size;
356};
357
358 class CV_EXPORTS_W GpuMatND
359{
360 public:
361 using SizeArray = std::vector<int>;
362 using StepArray = std::vector<size_t>;
363 using IndexArray = std::vector<int>;
364
367
370
376 GpuMatND(SizeArray size, int type);
377
390 GpuMatND(SizeArray size, int type, void* data, StepArray step = StepArray());
391
399 void create(SizeArray size, int type);
400
401 void release();
402
403 void swap(GpuMatND& m) noexcept;
404
411
415 GpuMatND clone(Stream& stream) const;
416
422 GpuMatND operator()(const std::vector<Range>& ranges) const;
423
429 GpuMat createGpuMatHeader(IndexArray idx, Range rowRange, Range colRange) const;
430
438
444 GpuMat operator()(IndexArray idx, Range rowRange, Range colRange) const;
445
450 operator GpuMat() const;
451
452 GpuMatND(const GpuMatND&) = default;
453 GpuMatND& operator=(const GpuMatND&) = default;
454
455 #if defined(__GNUC__) && __GNUC__ < 5
456 // error: function '...' defaulted on its first declaration with an exception-specification
457 // that differs from the implicit declaration '...'
458
459 GpuMatND(GpuMatND&&) = default;
460 GpuMatND& operator=(GpuMatND&&) = default;
461 #else
462 GpuMatND(GpuMatND&&) noexcept = default;
463 GpuMatND& operator=(GpuMatND&&) noexcept = default;
464 #endif
465
466 void upload(InputArray src);
467 void upload(InputArray src, Stream& stream);
468 void download(OutputArray dst) const;
469 void download(OutputArray dst, Stream& stream) const;
470
473 bool isContinuous() const;
474
476 bool isSubmatrix() const;
477
479 size_t elemSize() const;
480
482 size_t elemSize1() const;
483
485 bool empty() const;
486
488 bool external() const;
489
491 uchar* getDevicePtr() const;
492
494 size_t total() const;
495
497 size_t totalMemSize() const;
498
500 int type() const;
501
502 private:
504 void setFields(SizeArray size, int type, StepArray step = StepArray());
505
506 public:
513 int flags;
514
516 int dims;
517
519 SizeArray size;
520
524 StepArray step;
525
526 private:
530 std::shared_ptr<GpuData> data_;
531
537 uchar* data;
538
543 size_t offset;
544};
545
557 CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr);
558
568 CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr);
569
687 class CV_EXPORTS_W BufferPool
688{
689 public:
690
692 explicit BufferPool(Stream& stream);
693
695 CV_WRAP GpuMat getBuffer(int rows, int cols, int type);
696
698 CV_WRAP GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); }
699
701 CV_WRAP Ptr<GpuMat::Allocator> getAllocator() const { return allocator_; }
702
703 private:
704 Ptr<GpuMat::Allocator> allocator_;
705};
706
708 CV_EXPORTS_W void setBufferPoolUsage(bool on);
709CV_EXPORTS_W void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
710
711 //===================================================================================
712 // HostMem
713 //===================================================================================
714
730 class CV_EXPORTS_W HostMem
731{
732 public:
733 enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 };
734
735 static MatAllocator* getAllocator(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
736
737 CV_WRAP explicit HostMem(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
738
739 HostMem(const HostMem& m);
740
741 CV_WRAP HostMem(int rows, int cols, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
742 CV_WRAP HostMem(Size size, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
743
745 CV_WRAP explicit HostMem(InputArray arr, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
746
747 ~HostMem();
748
749 HostMem& operator =(const HostMem& m);
750
752 CV_WRAP void swap(HostMem& b);
753
755 CV_WRAP HostMem clone() const;
756
758 CV_WRAP void create(int rows, int cols, int type);
759 void create(Size size, int type);
760
763 CV_WRAP HostMem reshape(int cn, int rows = 0) const;
764
766 void release();
767
769 CV_WRAP Mat createMatHeader() const;
770
779
780 // Please see cv::Mat for descriptions
781 CV_WRAP bool isContinuous() const;
782 CV_WRAP size_t elemSize() const;
783 CV_WRAP size_t elemSize1() const;
784 CV_WRAP int type() const;
785 CV_WRAP int depth() const;
786 CV_WRAP int channels() const;
787 CV_WRAP size_t step1() const;
788 CV_WRAP Size size() const;
789 CV_WRAP bool empty() const;
790
791 // Please see cv::Mat for descriptions
792 int flags;
793 int rows, cols;
794 CV_PROP size_t step;
795
796 uchar* data;
797 int* refcount;
798
799 uchar* datastart;
800 const uchar* dataend;
801
802 AllocType alloc_type;
803};
804
809 CV_EXPORTS_W void registerPageLocked(Mat& m);
810
815 CV_EXPORTS_W void unregisterPageLocked(Mat& m);
816
817 //===================================================================================
818 // Stream
819 //===================================================================================
820
848 class CV_EXPORTS_W Stream
849{
850 typedef void (Stream::*bool_type)() const;
851 void this_type_does_not_support_comparisons() const {}
852
853 public:
854 typedef void (*StreamCallback)(int status, void* userData);
855
857 CV_WRAP Stream();
858
860 CV_WRAP Stream(const Ptr<GpuMat::Allocator>& allocator);
861
872 CV_WRAP Stream(const size_t cudaFlags);
873
876 CV_WRAP bool queryIfComplete() const;
877
880 CV_WRAP void waitForCompletion();
881
884 CV_WRAP void waitEvent(const Event& event);
885
894 void enqueueHostCallback(StreamCallback callback, void* userData);
895
897 CV_WRAP static Stream& Null();
898
900 operator bool_type() const;
901
903 CV_WRAP void* cudaPtr() const;
904
905 class Impl;
906
907 private:
908 Ptr<Impl> impl_;
909 Stream(const Ptr<Impl>& impl);
910
911 friend struct StreamAccessor;
912 friend class BufferPool;
913 friend class DefaultDeviceInitializer;
914};
915
916 class CV_EXPORTS_W Event
917{
918 public:
920 {
921 DEFAULT = 0x00,
922 BLOCKING_SYNC = 0x01,
923 DISABLE_TIMING = 0x02,
924 INTERPROCESS = 0x04
925 };
926
927 CV_WRAP explicit Event(Event::CreateFlags flags = Event::CreateFlags::DEFAULT);
928
930 CV_WRAP void record(Stream& stream = Stream::Null());
931
933 CV_WRAP bool queryIfComplete() const;
934
936 CV_WRAP void waitForCompletion();
937
939 CV_WRAP static float elapsedTime(const Event& start, const Event& end);
940
941 class Impl;
942
943 private:
944 Ptr<Impl> impl_;
945 Event(const Ptr<Impl>& impl);
946
947 friend struct EventAccessor;
948};
949
951
952 //===================================================================================
953 // Initialization & Info
954 //===================================================================================
955
958
965 CV_EXPORTS_W int getCudaEnabledDeviceCount();
966
973 CV_EXPORTS_W void setDevice(int device);
974
977 CV_EXPORTS_W int getDevice();
978
984 CV_EXPORTS_W void resetDevice();
985
989{
990 FEATURE_SET_COMPUTE_10 = 10,
991 FEATURE_SET_COMPUTE_11 = 11,
992 FEATURE_SET_COMPUTE_12 = 12,
993 FEATURE_SET_COMPUTE_13 = 13,
994 FEATURE_SET_COMPUTE_20 = 20,
995 FEATURE_SET_COMPUTE_21 = 21,
996 FEATURE_SET_COMPUTE_30 = 30,
997 FEATURE_SET_COMPUTE_32 = 32,
998 FEATURE_SET_COMPUTE_35 = 35,
999 FEATURE_SET_COMPUTE_50 = 50,
1000
1001 GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
1002 SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,
1003 NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13,
1004 WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30,
1005 DYNAMIC_PARALLELISM = FEATURE_SET_COMPUTE_35
1006};
1007
1009 CV_EXPORTS bool deviceSupports(FeatureSet feature_set);
1010
1017 class CV_EXPORTS_W TargetArchs
1018{
1019 public:
1024 static bool builtWith(FeatureSet feature_set);
1025
1032 CV_WRAP static bool has(int major, int minor);
1033 CV_WRAP static bool hasPtx(int major, int minor);
1034 CV_WRAP static bool hasBin(int major, int minor);
1035
1036 CV_WRAP static bool hasEqualOrLessPtx(int major, int minor);
1037 CV_WRAP static bool hasEqualOrGreater(int major, int minor);
1038 CV_WRAP static bool hasEqualOrGreaterPtx(int major, int minor);
1039 CV_WRAP static bool hasEqualOrGreaterBin(int major, int minor);
1040};
1041
1044 class CV_EXPORTS_W DeviceInfo
1045{
1046 public:
1048 CV_WRAP DeviceInfo();
1049
1057 CV_WRAP DeviceInfo(int device_id);
1058
1061 CV_WRAP int deviceID() const;
1062
1064 const char* name() const;
1065
1067 CV_WRAP size_t totalGlobalMem() const;
1068
1070 CV_WRAP size_t sharedMemPerBlock() const;
1071
1073 CV_WRAP int regsPerBlock() const;
1074
1076 CV_WRAP int warpSize() const;
1077
1079 CV_WRAP size_t memPitch() const;
1080
1082 CV_WRAP int maxThreadsPerBlock() const;
1083
1085 CV_WRAP Vec3i maxThreadsDim() const;
1086
1088 CV_WRAP Vec3i maxGridSize() const;
1089
1091 CV_WRAP int clockRate() const;
1092
1094 CV_WRAP size_t totalConstMem() const;
1095
1097 CV_WRAP int majorVersion() const;
1098
1100 CV_WRAP int minorVersion() const;
1101
1103 CV_WRAP size_t textureAlignment() const;
1104
1106 CV_WRAP size_t texturePitchAlignment() const;
1107
1109 CV_WRAP int multiProcessorCount() const;
1110
1112 CV_WRAP bool kernelExecTimeoutEnabled() const;
1113
1115 CV_WRAP bool integrated() const;
1116
1118 CV_WRAP bool canMapHostMemory() const;
1119
1121 {
1125 ComputeModeExclusiveProcess
1127
1129 CV_WRAP DeviceInfo::ComputeMode computeMode() const;
1130
1132 CV_WRAP int maxTexture1D() const;
1133
1135 CV_WRAP int maxTexture1DMipmap() const;
1136
1138 CV_WRAP int maxTexture1DLinear() const;
1139
1141 CV_WRAP Vec2i maxTexture2D() const;
1142
1144 CV_WRAP Vec2i maxTexture2DMipmap() const;
1145
1147 CV_WRAP Vec3i maxTexture2DLinear() const;
1148
1150 CV_WRAP Vec2i maxTexture2DGather() const;
1151
1153 CV_WRAP Vec3i maxTexture3D() const;
1154
1156 CV_WRAP int maxTextureCubemap() const;
1157
1159 CV_WRAP Vec2i maxTexture1DLayered() const;
1160
1162 CV_WRAP Vec3i maxTexture2DLayered() const;
1163
1165 CV_WRAP Vec2i maxTextureCubemapLayered() const;
1166
1168 CV_WRAP int maxSurface1D() const;
1169
1171 CV_WRAP Vec2i maxSurface2D() const;
1172
1174 CV_WRAP Vec3i maxSurface3D() const;
1175
1177 CV_WRAP Vec2i maxSurface1DLayered() const;
1178
1180 CV_WRAP Vec3i maxSurface2DLayered() const;
1181
1183 CV_WRAP int maxSurfaceCubemap() const;
1184
1186 CV_WRAP Vec2i maxSurfaceCubemapLayered() const;
1187
1189 CV_WRAP size_t surfaceAlignment() const;
1190
1192 CV_WRAP bool concurrentKernels() const;
1193
1195 CV_WRAP bool ECCEnabled() const;
1196
1198 CV_WRAP int pciBusID() const;
1199
1201 CV_WRAP int pciDeviceID() const;
1202
1204 CV_WRAP int pciDomainID() const;
1205
1207 CV_WRAP bool tccDriver() const;
1208
1210 CV_WRAP int asyncEngineCount() const;
1211
1213 CV_WRAP bool unifiedAddressing() const;
1214
1216 CV_WRAP int memoryClockRate() const;
1217
1219 CV_WRAP int memoryBusWidth() const;
1220
1222 CV_WRAP int l2CacheSize() const;
1223
1225 CV_WRAP int maxThreadsPerMultiProcessor() const;
1226
1228 CV_WRAP void queryMemory(size_t& totalMemory, size_t& freeMemory) const;
1229 CV_WRAP size_t freeMemory() const;
1230 CV_WRAP size_t totalMemory() const;
1231
1238 bool supports(FeatureSet feature_set) const;
1239
1245 CV_WRAP bool isCompatible() const;
1246
1247 private:
1248 int device_id_;
1249};
1250
1251CV_EXPORTS_W void printCudaDeviceInfo(int device);
1252CV_EXPORTS_W void printShortCudaDeviceInfo(int device);
1253
1261 CV_EXPORTS void convertFp16(InputArray _src, OutputArray _dst, Stream& stream = Stream::Null());
1262
1264
1265}} // namespace cv { namespace cuda {
1266
1267
1268 #include "opencv2/core/cuda.inl.hpp"
1269
1270 #endif /* OPENCV_CORE_CUDA_HPP */
This type is very similar to InputArray except that it is used for input/output and output function p...
Definition: mat.hpp:295
Custom array allocator
Definition: mat.hpp:470
n-dimensional dense array class
Definition: mat.hpp:802
Template class specifying a continuous subsequence (slice) of a sequence.
Definition: core/types.hpp:590
Template class for 2D rectangles
Definition: core/types.hpp:421
Template class for specifying the size of an image or rectangle.
Definition: core/types.hpp:316
Template class for short numerical vectors, a partial case of Matx
Definition: matx.hpp:342
BufferPool for use with CUDA streams
Definition: core/cuda.hpp:688
BufferPool(Stream &stream)
Gets the BufferPool for the given stream.
Class providing functionality for querying the specified GPU properties.
Definition: core/cuda.hpp:1045
bool supports(FeatureSet feature_set) const
Provides information on CUDA feature support.
const char * name() const
ASCII string identifying device
ComputeMode
Definition: core/cuda.hpp:1121
@ ComputeModeProhibited
Definition: core/cuda.hpp:1124
@ ComputeModeDefault
Definition: core/cuda.hpp:1122
@ ComputeModeExclusive
Definition: core/cuda.hpp:1123
Definition: core/cuda.hpp:917
CreateFlags
Definition: core/cuda.hpp:920
Definition: core/cuda.hpp:109
Base storage class for GPU memory with reference counting.
Definition: core/cuda.hpp:106
Allocator * allocator
allocator
Definition: core/cuda.hpp:340
void release()
decreases reference counter, deallocate the data when reference counter reaches 0
uchar * data
pointer to the data
Definition: core/cuda.hpp:329
~GpuMat()
destructor - calls release()
static CV_WRAP GpuMat::Allocator * defaultAllocator()
default allocator
int rows
the number of rows and columns
Definition: core/cuda.hpp:323
uchar * ptr(int y=0)
returns pointer to y-th row
GpuMat(int rows, int cols, int type, void *data, size_t step=Mat::AUTO_STEP)
constructor for GpuMat headers pointing to user-allocated data
_Tp * ptr(int y=0)
template version of the above method
uchar * datastart
helper fields used in locateROI and adjustROI
Definition: core/cuda.hpp:336
int flags
Definition: core/cuda.hpp:320
int * refcount
Definition: core/cuda.hpp:333
Definition: core/cuda.hpp:359
GpuMatND()
default constructor
GpuMatND clone(Stream &stream) const
int dims
matrix dimensionality
Definition: core/cuda.hpp:516
bool isSubmatrix() const
returns true if the matrix is a sub-matrix of another matrix
~GpuMatND()
destructor
bool external() const
returns true if not empty and points to external(user-allocated) gpu memory
size_t total() const
returns the total number of array elements
GpuMatND clone() const
Creates a full copy of the array and the underlying data. The method creates a full copy of the array...
size_t elemSize1() const
returns the size of element channel in bytes
StepArray step
Definition: core/cuda.hpp:524
bool isContinuous() const
int flags
Definition: core/cuda.hpp:513
GpuMat createGpuMatHeader(IndexArray idx, Range rowRange, Range colRange) const
Creates a GpuMat header for a 2D plane part of an n-dim matrix.
size_t elemSize() const
returns element size in bytes
GpuMat createGpuMatHeader() const
GpuMat operator()(IndexArray idx, Range rowRange, Range colRange) const
Extracts a 2D plane part of an n-dim matrix. It differs from createGpuMatHeader(IndexArray,...
size_t totalMemSize() const
returns the size of underlying memory in bytes
GpuMatND(SizeArray size, int type)
int type() const
returns element type
SizeArray size
shape of this array
Definition: core/cuda.hpp:519
GpuMatND operator()(const std::vector< Range > &ranges) const
Extracts a sub-matrix. The operator makes a new header for the specified sub-array of *this....
void create(SizeArray size, int type)
Allocates GPU memory. Suppose there is some GPU memory already allocated. In that case,...
GpuMatND(SizeArray size, int type, void *data, StepArray step=StepArray())
uchar * getDevicePtr() const
returns pointer to the first byte of the GPU memory
bool empty() const
returns true if data is null
Class with reference counting wrapping special memory type allocation functions from CUDA.
Definition: core/cuda.hpp:731
GpuMat createGpuMatHeader() const
Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting f...
void release()
decrements reference counter and released memory if needed.
This class encapsulates a queue of asynchronous calls.
Definition: core/cuda.hpp:849
static CV_WRAP Stream & Null()
return Stream object for default CUDA stream
void enqueueHostCallback(StreamCallback callback, void *userData)
Adds a callback to be called on the host after all currently enqueued items in the stream have comple...
Class providing a set of static methods to check what NVIDIA* card architecture the CUDA module was b...
Definition: core/cuda.hpp:1018
static bool builtWith(FeatureSet feature_set)
The following method checks whether the module was built with the support of the given feature:
void CV_EXPORTS_W copyTo(InputArray src, OutputArray dst, InputArray mask)
This is an overloaded member function, provided for convenience (python) Copies the matrix to another...
CV_EXPORTS_W void convertFp16(InputArray src, OutputArray dst)
Converts an array to half precision floating number.
CV_EXPORTS void swap(Mat &a, Mat &b)
Swaps two matrices
CV_EXPORTS_W void setDevice(int device)
Sets a device and initializes it for the current thread.
FeatureSet
Enumeration providing CUDA computing features.
Definition: core/cuda.hpp:989
CV_EXPORTS_W int getDevice()
Returns the current device index set by cuda::setDevice or initialized by default.
CV_EXPORTS_W void resetDevice()
Explicitly destroys and cleans up all resources associated with the current device in the current pro...
CV_EXPORTS_W int getCudaEnabledDeviceCount()
Returns the number of installed CUDA-enabled devices.
CV_EXPORTS bool deviceSupports(FeatureSet feature_set)
checks whether current device supports the given feature
CV_EXPORTS_W void setBufferPoolUsage(bool on)
BufferPool management (must be called before Stream creation)
CV_EXPORTS_W void unregisterPageLocked(Mat &m)
Unmaps the memory of matrix and makes it pageable again.
CV_EXPORTS_W void registerPageLocked(Mat &m)
Page-locks the memory of matrix and maps it for the device(s).
CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr)
Creates a continuous matrix.
CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
Ensures that the size of a matrix is big enough and the matrix has a proper type.
cv
"black box" representation of the file storage associated with a file on disk.
Definition: aruco.hpp:75
Definition: cvstd_wrapper.hpp:74
Class that enables getting cudaEvent_t from cuda::Event
Definition: cuda_stream_accessor.hpp:76
Definition: core/cuda.hpp:344
Class that enables getting cudaStream_t from cuda::Stream
Definition: cuda_stream_accessor.hpp:68