OpenCV 4.5.3(日本語機械翻訳)
全て クラス 名前空間 ファイル 関数 変数 型定義 列挙型 列挙値 フレンド グループ ページ
cuda.inl.hpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16 // Third party copyrights are property of their respective owners.
17 //
18 // Redistribution and use in source and binary forms, with or without modification,
19 // are permitted provided that the following conditions are met:
20 //
21 // * Redistribution's of source code must retain the above copyright notice,
22 // this list of conditions and the following disclaimer.
23 //
24 // * Redistribution's in binary form must reproduce the above copyright notice,
25 // this list of conditions and the following disclaimer in the documentation
26 // and/or other materials provided with the distribution.
27 //
28 // * The name of the copyright holders may not be used to endorse or promote products
29 // derived from this software without specific prior written permission.
30 //
31 // This software is provided by the copyright holders and contributors "as is" and
32 // any express or implied warranties, including, but not limited to, the implied
33 // warranties of merchantability and fitness for a particular purpose are disclaimed.
34 // In no event shall the Intel Corporation or contributors be liable for any direct,
35 // indirect, incidental, special, exemplary, or consequential damages
36 // (including, but not limited to, procurement of substitute goods or services;
37 // loss of use, data, or profits; or business interruption) however caused
38 // and on any theory of liability, whether in contract, strict liability,
39 // or tort (including negligence or otherwise) arising in any way out of
40 // the use of this software, even if advised of the possibility of such damage.
41 //
42 //M*/
43
44 #ifndef OPENCV_CORE_CUDAINL_HPP
45 #define OPENCV_CORE_CUDAINL_HPP
46
47 #include "opencv2/core/cuda.hpp"
48
50
51 namespace cv { namespace cuda {
52
53 //===================================================================================
54 // GpuMat
55 //===================================================================================
56
57 inline
58 GpuMat::GpuMat(Allocator* allocator_)
59 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
60{}
61
62 inline
63GpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_)
64 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
65{
66 if (rows_ > 0 && cols_ > 0)
67 create(rows_, cols_, type_);
68}
69
70 inline
71GpuMat::GpuMat(Size size_, int type_, Allocator* allocator_)
72 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
73{
74 if (size_.height > 0 && size_.width > 0)
75 create(size_.height, size_.width, type_);
76}
77
78 inline
79GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_)
80 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
81{
82 if (rows_ > 0 && cols_ > 0)
83 {
84 create(rows_, cols_, type_);
85 setTo(s_);
86 }
87}
88
89 inline
90GpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_)
91 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
92{
93 if (size_.height > 0 && size_.width > 0)
94 {
95 create(size_.height, size_.width, type_);
96 setTo(s_);
97 }
98}
99
100 inline
101GpuMat::GpuMat(const GpuMat& m)
102 : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator)
103{
104 if (refcount)
105 CV_XADD(refcount, 1);
106}
107
108 inline
109GpuMat::GpuMat(InputArray arr, Allocator* allocator_) :
110 flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
111{
112 upload(arr);
113}
114
115 inline
116GpuMat::~GpuMat()
117{
118 release();
119}
120
121 inline
122GpuMat& GpuMat::operator =(const GpuMat& m)
123{
124 if (this != &m)
125 {
126 GpuMat temp(m);
127 swap(temp);
128 }
129
130 return *this;
131}
132
133 inline
134 void GpuMat::create(Size size_, int type_)
135{
136 create(size_.height, size_.width, type_);
137}
138
139 inline
140 void GpuMat::swap(GpuMat& b)
141{
142 std::swap(flags, b.flags);
143 std::swap(rows, b.rows);
144 std::swap(cols, b.cols);
145 std::swap(step, b.step);
146 std::swap(data, b.data);
147 std::swap(datastart, b.datastart);
148 std::swap(dataend, b.dataend);
149 std::swap(refcount, b.refcount);
150 std::swap(allocator, b.allocator);
151}
152
153 inline
154GpuMat GpuMat::clone() const
155 {
156 GpuMat m;
157 copyTo(m);
158 return m;
159}
160
161 inline
162 void GpuMat::copyTo(OutputArray dst, InputArray mask) const
163 {
164 copyTo(dst, mask, Stream::Null());
165}
166
167 inline
168GpuMat& GpuMat::setTo(Scalar s)
169{
170 return setTo(s, Stream::Null());
171}
172
173 inline
174GpuMat& GpuMat::setTo(Scalar s, InputArray mask)
175{
176 return setTo(s, mask, Stream::Null());
177}
178
179 inline
180 void GpuMat::convertTo(OutputArray dst, int rtype) const
181 {
182 convertTo(dst, rtype, Stream::Null());
183}
184
185 inline
186 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const
187 {
188 convertTo(dst, rtype, alpha, beta, Stream::Null());
189}
190
191 inline
192 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const
193 {
194 convertTo(dst, rtype, alpha, 0.0, stream);
195}
196
197 inline
198 void GpuMat::assignTo(GpuMat& m, int _type) const
199 {
200 if (_type < 0)
201 m = *this;
202 else
203 convertTo(m, _type);
204}
205
206 inline
207uchar* GpuMat::ptr(int y)
208{
209 CV_DbgAssert( (unsigned)y < (unsigned)rows );
210 return data + step * y;
211}
212
213 inline
214 const uchar* GpuMat::ptr(int y) const
215 {
216 CV_DbgAssert( (unsigned)y < (unsigned)rows );
217 return data + step * y;
218}
219
220 template<typename _Tp> inline
221_Tp* GpuMat::ptr(int y)
222{
223 return (_Tp*)ptr(y);
224}
225
226 template<typename _Tp> inline
227 const _Tp* GpuMat::ptr(int y) const
228 {
229 return (const _Tp*)ptr(y);
230}
231
232 template <class T> inline
233GpuMat::operator PtrStepSz<T>() const
234 {
235 return PtrStepSz<T>(rows, cols, (T*)data, step);
236}
237
238 template <class T> inline
239GpuMat::operator PtrStep<T>() const
240 {
241 return PtrStep<T>((T*)data, step);
242}
243
244 inline
245GpuMat GpuMat::row(int y) const
246 {
247 return GpuMat(*this, Range(y, y+1), Range::all());
248}
249
250 inline
251GpuMat GpuMat::col(int x) const
252 {
253 return GpuMat(*this, Range::all(), Range(x, x+1));
254}
255
256 inline
257GpuMat GpuMat::rowRange(int startrow, int endrow) const
258 {
259 return GpuMat(*this, Range(startrow, endrow), Range::all());
260}
261
262 inline
263GpuMat GpuMat::rowRange(Range r) const
264 {
265 return GpuMat(*this, r, Range::all());
266}
267
268 inline
269GpuMat GpuMat::colRange(int startcol, int endcol) const
270 {
271 return GpuMat(*this, Range::all(), Range(startcol, endcol));
272}
273
274 inline
275GpuMat GpuMat::colRange(Range r) const
276 {
277 return GpuMat(*this, Range::all(), r);
278}
279
280 inline
281GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const
282 {
283 return GpuMat(*this, rowRange_, colRange_);
284}
285
286 inline
287GpuMat GpuMat::operator ()(Rect roi) const
288 {
289 return GpuMat(*this, roi);
290}
291
292 inline
293 bool GpuMat::isContinuous() const
294 {
295 return (flags & Mat::CONTINUOUS_FLAG) != 0;
296}
297
298 inline
299 size_t GpuMat::elemSize() const
300 {
301 return CV_ELEM_SIZE(flags);
302}
303
304 inline
305 size_t GpuMat::elemSize1() const
306 {
307 return CV_ELEM_SIZE1(flags);
308}
309
310 inline
311 int GpuMat::type() const
312 {
313 return CV_MAT_TYPE(flags);
314}
315
316 inline
317 int GpuMat::depth() const
318 {
319 return CV_MAT_DEPTH(flags);
320}
321
322 inline
323 int GpuMat::channels() const
324 {
325 return CV_MAT_CN(flags);
326}
327
328 inline
329 size_t GpuMat::step1() const
330 {
331 return step / elemSize1();
332}
333
334 inline
335Size GpuMat::size() const
336 {
337 return Size(cols, rows);
338}
339
340 inline
341 bool GpuMat::empty() const
342 {
343 return data == 0;
344}
345
346 inline
347 void* GpuMat::cudaPtr() const
348 {
349 return data;
350}
351
352 static inline
353GpuMat createContinuous(int rows, int cols, int type)
354{
355 GpuMat m;
356 createContinuous(rows, cols, type, m);
357 return m;
358}
359
360 static inline
361 void createContinuous(Size size, int type, OutputArray arr)
362{
363 createContinuous(size.height, size.width, type, arr);
364}
365
366 static inline
367GpuMat createContinuous(Size size, int type)
368{
369 GpuMat m;
370 createContinuous(size, type, m);
371 return m;
372}
373
374 static inline
375 void ensureSizeIsEnough(Size size, int type, OutputArray arr)
376{
377 ensureSizeIsEnough(size.height, size.width, type, arr);
378}
379
380 static inline
381 void swap(GpuMat& a, GpuMat& b)
382{
383 a.swap(b);
384}
385
386 //===================================================================================
387 // GpuMatND
388 //===================================================================================
389
390 inline
391GpuMatND::GpuMatND() :
392 flags(0), dims(0), data(nullptr), offset(0)
393{
394}
395
396 inline
397GpuMatND::GpuMatND(SizeArray _size, int _type) :
398 flags(0), dims(0), data(nullptr), offset(0)
399{
400 create(std::move(_size), _type);
401}
402
403 inline
404 void GpuMatND::swap(GpuMatND& m) noexcept
405{
406 std::swap(*this, m);
407}
408
409 inline
410 bool GpuMatND::isContinuous() const
411 {
412 return (flags & Mat::CONTINUOUS_FLAG) != 0;
413}
414
415 inline
416 bool GpuMatND::isSubmatrix() const
417 {
418 return (flags & Mat::SUBMATRIX_FLAG) != 0;
419}
420
421 inline
422 size_t GpuMatND::elemSize() const
423 {
424 return CV_ELEM_SIZE(flags);
425}
426
427 inline
428 size_t GpuMatND::elemSize1() const
429 {
430 return CV_ELEM_SIZE1(flags);
431}
432
433 inline
434 bool GpuMatND::empty() const
435 {
436 return data == nullptr;
437}
438
439 inline
440 bool GpuMatND::external() const
441 {
442 return !empty() && data_.use_count() == 0;
443}
444
445 inline
446uchar* GpuMatND::getDevicePtr() const
447 {
448 return data + offset;
449}
450
451 inline
452 size_t GpuMatND::total() const
453 {
454 size_t p = 1;
455 for(auto s : size)
456 p *= s;
457 return p;
458}
459
460 inline
461 size_t GpuMatND::totalMemSize() const
462 {
463 return size[0] * step[0];
464}
465
466 inline
467 int GpuMatND::type() const
468 {
469 return CV_MAT_TYPE(flags);
470}
471
472 //===================================================================================
473 // HostMem
474 //===================================================================================
475
476 inline
477HostMem::HostMem(AllocType alloc_type_)
478 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
479{
480}
481
482 inline
483HostMem::HostMem(const HostMem& m)
484 : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
485{
486 if( refcount )
487 CV_XADD(refcount, 1);
488}
489
490 inline
491HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_)
492 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
493{
494 if (rows_ > 0 && cols_ > 0)
495 create(rows_, cols_, type_);
496}
497
498 inline
499HostMem::HostMem(Size size_, int type_, AllocType alloc_type_)
500 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
501{
502 if (size_.height > 0 && size_.width > 0)
503 create(size_.height, size_.width, type_);
504}
505
506 inline
507HostMem::HostMem(InputArray arr, AllocType alloc_type_)
508 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
509{
510 arr.getMat().copyTo(*this);
511}
512
513 inline
514HostMem::~HostMem()
515{
516 release();
517}
518
519 inline
520HostMem& HostMem::operator =(const HostMem& m)
521{
522 if (this != &m)
523 {
524 HostMem temp(m);
525 swap(temp);
526 }
527
528 return *this;
529}
530
531 inline
532 void HostMem::swap(HostMem& b)
533{
534 std::swap(flags, b.flags);
535 std::swap(rows, b.rows);
536 std::swap(cols, b.cols);
537 std::swap(step, b.step);
538 std::swap(data, b.data);
539 std::swap(datastart, b.datastart);
540 std::swap(dataend, b.dataend);
541 std::swap(refcount, b.refcount);
542 std::swap(alloc_type, b.alloc_type);
543}
544
545 inline
546HostMem HostMem::clone() const
547 {
548 HostMem m(size(), type(), alloc_type);
549 createMatHeader().copyTo(m);
550 return m;
551}
552
553 inline
554 void HostMem::create(Size size_, int type_)
555{
556 create(size_.height, size_.width, type_);
557}
558
559 inline
560Mat HostMem::createMatHeader() const
561 {
562 return Mat(size(), type(), data, step);
563}
564
565 inline
566 bool HostMem::isContinuous() const
567 {
568 return (flags & Mat::CONTINUOUS_FLAG) != 0;
569}
570
571 inline
572 size_t HostMem::elemSize() const
573 {
574 return CV_ELEM_SIZE(flags);
575}
576
577 inline
578 size_t HostMem::elemSize1() const
579 {
580 return CV_ELEM_SIZE1(flags);
581}
582
583 inline
584 int HostMem::type() const
585 {
586 return CV_MAT_TYPE(flags);
587}
588
589 inline
590 int HostMem::depth() const
591 {
592 return CV_MAT_DEPTH(flags);
593}
594
595 inline
596 int HostMem::channels() const
597 {
598 return CV_MAT_CN(flags);
599}
600
601 inline
602 size_t HostMem::step1() const
603 {
604 return step / elemSize1();
605}
606
607 inline
608Size HostMem::size() const
609 {
610 return Size(cols, rows);
611}
612
613 inline
614 bool HostMem::empty() const
615 {
616 return data == 0;
617}
618
619 static inline
620 void swap(HostMem& a, HostMem& b)
621{
622 a.swap(b);
623}
624
625 //===================================================================================
626 // Stream
627 //===================================================================================
628
629 inline
630Stream::Stream(const Ptr<Impl>& impl)
631 : impl_(impl)
632{
633}
634
635 //===================================================================================
636 // Event
637 //===================================================================================
638
639 inline
640Event::Event(const Ptr<Impl>& impl)
641 : impl_(impl)
642{
643}
644
645 //===================================================================================
646 // Initialization & Info
647 //===================================================================================
648
649 inline
650 bool TargetArchs::has(int major, int minor)
651{
652 return hasPtx(major, minor) || hasBin(major, minor);
653}
654
655 inline
656 bool TargetArchs::hasEqualOrGreater(int major, int minor)
657{
658 return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor);
659}
660
661 inline
662DeviceInfo::DeviceInfo()
663{
664 device_id_ = getDevice();
665}
666
667 inline
668DeviceInfo::DeviceInfo(int device_id)
669{
670 CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() );
671 device_id_ = device_id;
672}
673
674 inline
675 int DeviceInfo::deviceID() const
676 {
677 return device_id_;
678}
679
680 inline
681 size_t DeviceInfo::freeMemory() const
682 {
683 size_t _totalMemory = 0, _freeMemory = 0;
684 queryMemory(_totalMemory, _freeMemory);
685 return _freeMemory;
686}
687
688 inline
689 size_t DeviceInfo::totalMemory() const
690 {
691 size_t _totalMemory = 0, _freeMemory = 0;
692 queryMemory(_totalMemory, _freeMemory);
693 return _totalMemory;
694}
695
696 inline
697 bool DeviceInfo::supports(FeatureSet feature_set) const
698 {
699 int version = majorVersion() * 10 + minorVersion();
700 return version >= feature_set;
701}
702
703
704}} // namespace cv { namespace cuda {
705
706 //===================================================================================
707 // Mat
708 //===================================================================================
709
710 namespace cv {
711
712 inline
713 Mat::Mat(const cuda::GpuMat& m)
714 : flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows)
715{
716 m.download(*this);
717}
718
719}
720
722
723 #endif // OPENCV_CORE_CUDAINL_HPP
Mat() CV_NOEXCEPT
CV_WRAP GpuMat(GpuMat::Allocator *allocator=GpuMat::defaultAllocator())
default constructor
void CV_EXPORTS_W copyTo(InputArray src, OutputArray dst, InputArray mask)
This is an overloaded member function, provided for convenience (python) Copies the matrix to another...
#define CV_ELEM_SIZE1(type)
Definition: cvdef.h:484
#define CV_Assert(expr)
Checks a condition at runtime and throws exception if it fails
Definition: base.hpp:342
#define CV_DbgAssert(expr)
Definition: base.hpp:375
CV_EXPORTS void swap(Mat &a, Mat &b)
Swaps two matrices
FeatureSet
Enumeration providing CUDA computing features.
Definition: core/cuda.hpp:989
CV_EXPORTS_W int getDevice()
Returns the current device index set by cuda::setDevice or initialized by default.
CV_EXPORTS_W int getCudaEnabledDeviceCount()
Returns the number of installed CUDA-enabled devices.
CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr)
Creates a continuous matrix.
CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
Ensures that the size of a matrix is big enough and the matrix has a proper type.
cv
"black box" representation of the file storage associated with a file on disk.
Definition: aruco.hpp:75