Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/core/src/cuda/gpu_mat.cu
16339 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "opencv2/opencv_modules.hpp"
44
45
#ifndef HAVE_OPENCV_CUDEV
46
47
#error "opencv_cudev is required"
48
49
#else
50
51
#include "opencv2/core/cuda.hpp"
52
#include "opencv2/cudev.hpp"
53
#include "opencv2/core/cuda/utility.hpp"
54
55
using namespace cv;
56
using namespace cv::cuda;
57
using namespace cv::cudev;
58
59
device::ThrustAllocator::~ThrustAllocator()
60
{
61
}
62
namespace
63
{
64
class DefaultThrustAllocator: public cv::cuda::device::ThrustAllocator
65
{
66
public:
67
__device__ __host__ uchar* allocate(size_t numBytes) CV_OVERRIDE
68
{
69
#ifndef __CUDA_ARCH__
70
uchar* ptr;
71
CV_CUDEV_SAFE_CALL(cudaMalloc(&ptr, numBytes));
72
return ptr;
73
#else
74
return NULL;
75
#endif
76
}
77
__device__ __host__ void deallocate(uchar* ptr, size_t numBytes) CV_OVERRIDE
78
{
79
CV_UNUSED(numBytes);
80
#ifndef __CUDA_ARCH__
81
CV_CUDEV_SAFE_CALL(cudaFree(ptr));
82
#endif
83
}
84
};
85
DefaultThrustAllocator defaultThrustAllocator;
86
cv::cuda::device::ThrustAllocator* g_thrustAllocator = &defaultThrustAllocator;
87
}
88
89
90
cv::cuda::device::ThrustAllocator& cv::cuda::device::ThrustAllocator::getAllocator()
91
{
92
return *g_thrustAllocator;
93
}
94
95
void cv::cuda::device::ThrustAllocator::setAllocator(cv::cuda::device::ThrustAllocator* allocator)
96
{
97
if(allocator == NULL)
98
g_thrustAllocator = &defaultThrustAllocator;
99
else
100
g_thrustAllocator = allocator;
101
}
102
103
namespace
104
{
105
class DefaultAllocator : public GpuMat::Allocator
106
{
107
public:
108
bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize) CV_OVERRIDE;
109
void free(GpuMat* mat) CV_OVERRIDE;
110
};
111
112
bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize)
113
{
114
if (rows > 1 && cols > 1)
115
{
116
CV_CUDEV_SAFE_CALL( cudaMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) );
117
}
118
else
119
{
120
// Single row or single column must be continuous
121
CV_CUDEV_SAFE_CALL( cudaMalloc(&mat->data, elemSize * cols * rows) );
122
mat->step = elemSize * cols;
123
}
124
125
mat->refcount = (int*) fastMalloc(sizeof(int));
126
127
return true;
128
}
129
130
void DefaultAllocator::free(GpuMat* mat)
131
{
132
cudaFree(mat->datastart);
133
fastFree(mat->refcount);
134
}
135
136
DefaultAllocator cudaDefaultAllocator;
137
GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator;
138
}
139
140
GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator()
141
{
142
return g_defaultAllocator;
143
}
144
145
void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator)
146
{
147
CV_Assert( allocator != 0 );
148
g_defaultAllocator = allocator;
149
}
150
151
/////////////////////////////////////////////////////
152
/// create
153
154
void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
155
{
156
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
157
158
_type &= Mat::TYPE_MASK;
159
160
if (rows == _rows && cols == _cols && type() == _type && data)
161
return;
162
163
if (data)
164
release();
165
166
if (_rows > 0 && _cols > 0)
167
{
168
flags = Mat::MAGIC_VAL + _type;
169
rows = _rows;
170
cols = _cols;
171
172
const size_t esz = elemSize();
173
174
bool allocSuccess = allocator->allocate(this, rows, cols, esz);
175
176
if (!allocSuccess)
177
{
178
// custom allocator fails, try default allocator
179
allocator = defaultAllocator();
180
allocSuccess = allocator->allocate(this, rows, cols, esz);
181
CV_Assert( allocSuccess );
182
}
183
184
if (esz * cols == step)
185
flags |= Mat::CONTINUOUS_FLAG;
186
187
int64 _nettosize = static_cast<int64>(step) * rows;
188
size_t nettosize = static_cast<size_t>(_nettosize);
189
190
datastart = data;
191
dataend = data + nettosize;
192
193
if (refcount)
194
*refcount = 1;
195
}
196
}
197
198
/////////////////////////////////////////////////////
199
/// release
200
201
void cv::cuda::GpuMat::release()
202
{
203
CV_DbgAssert( allocator != 0 );
204
205
if (refcount && CV_XADD(refcount, -1) == 1)
206
allocator->free(this);
207
208
dataend = data = datastart = 0;
209
step = rows = cols = 0;
210
refcount = 0;
211
}
212
213
/////////////////////////////////////////////////////
214
/// upload
215
216
void cv::cuda::GpuMat::upload(InputArray arr)
217
{
218
Mat mat = arr.getMat();
219
220
CV_DbgAssert( !mat.empty() );
221
222
create(mat.size(), mat.type());
223
224
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
225
}
226
227
void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream)
228
{
229
Mat mat = arr.getMat();
230
231
CV_DbgAssert( !mat.empty() );
232
233
create(mat.size(), mat.type());
234
235
cudaStream_t stream = StreamAccessor::getStream(_stream);
236
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice, stream) );
237
}
238
239
/////////////////////////////////////////////////////
240
/// download
241
242
void cv::cuda::GpuMat::download(OutputArray _dst) const
243
{
244
CV_DbgAssert( !empty() );
245
246
_dst.create(size(), type());
247
Mat dst = _dst.getMat();
248
249
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
250
}
251
252
void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const
253
{
254
CV_DbgAssert( !empty() );
255
256
_dst.create(size(), type());
257
Mat dst = _dst.getMat();
258
259
cudaStream_t stream = StreamAccessor::getStream(_stream);
260
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost, stream) );
261
}
262
263
/////////////////////////////////////////////////////
264
/// copyTo
265
266
void cv::cuda::GpuMat::copyTo(OutputArray _dst) const
267
{
268
CV_DbgAssert( !empty() );
269
270
_dst.create(size(), type());
271
GpuMat dst = _dst.getGpuMat();
272
273
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) );
274
}
275
276
void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
277
{
278
CV_DbgAssert( !empty() );
279
280
_dst.create(size(), type());
281
GpuMat dst = _dst.getGpuMat();
282
283
cudaStream_t stream = StreamAccessor::getStream(_stream);
284
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice, stream) );
285
}
286
287
namespace
288
{
289
template <size_t size> struct CopyToPolicy : DefaultTransformPolicy
290
{
291
};
292
template <> struct CopyToPolicy<4> : DefaultTransformPolicy
293
{
294
enum {
295
shift = 2
296
};
297
};
298
template <> struct CopyToPolicy<8> : DefaultTransformPolicy
299
{
300
enum {
301
shift = 1
302
};
303
};
304
305
template <typename T>
306
void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream)
307
{
308
gridTransformUnary_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream);
309
}
310
}
311
312
void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
313
{
314
CV_DbgAssert( !empty() );
315
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
316
317
GpuMat mask = _mask.getGpuMat();
318
CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) );
319
320
uchar* data0 = _dst.getGpuMat().data;
321
322
_dst.create(size(), type());
323
GpuMat dst = _dst.getGpuMat();
324
325
// do not leave dst uninitialized
326
if (dst.data != data0)
327
dst.setTo(Scalar::all(0), stream);
328
329
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream);
330
static const func_t funcs[9][4] =
331
{
332
{0,0,0,0},
333
{copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>},
334
{copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>},
335
{0,0,0,0},
336
{copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>},
337
{0,0,0,0},
338
{0,0,0,0},
339
{0,0,0,0},
340
{copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>}
341
};
342
343
if (mask.channels() == channels())
344
{
345
const func_t func = funcs[elemSize1()][0];
346
CV_DbgAssert( func != 0 );
347
func(reshape(1), dst.reshape(1), mask.reshape(1), stream);
348
}
349
else
350
{
351
const func_t func = funcs[elemSize1()][channels() - 1];
352
CV_DbgAssert( func != 0 );
353
func(*this, dst, mask, stream);
354
}
355
}
356
357
/////////////////////////////////////////////////////
358
/// setTo
359
360
namespace
361
{
362
template <typename T>
363
void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream)
364
{
365
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
366
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream);
367
}
368
369
template <typename T>
370
void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream)
371
{
372
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
373
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream);
374
}
375
}
376
377
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream)
378
{
379
CV_DbgAssert( !empty() );
380
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
381
382
if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0)
383
{
384
// Zero fill
385
386
if (stream)
387
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
388
else
389
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, 0, cols * elemSize(), rows) );
390
391
return *this;
392
}
393
394
if (depth() == CV_8U)
395
{
396
const int cn = channels();
397
398
if (cn == 1
399
|| (cn == 2 && value[0] == value[1])
400
|| (cn == 3 && value[0] == value[1] && value[0] == value[2])
401
|| (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3]))
402
{
403
const int val = cv::saturate_cast<uchar>(value[0]);
404
405
if (stream)
406
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
407
else
408
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, val, cols * elemSize(), rows) );
409
410
return *this;
411
}
412
}
413
414
typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream);
415
static const func_t funcs[7][4] =
416
{
417
{setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>},
418
{setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>},
419
{setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>},
420
{setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>},
421
{setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>},
422
{setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>},
423
{setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>}
424
};
425
426
funcs[depth()][channels() - 1](*this, value, stream);
427
428
return *this;
429
}
430
431
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
432
{
433
CV_DbgAssert( !empty() );
434
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
435
436
GpuMat mask = _mask.getGpuMat();
437
438
if (mask.empty())
439
{
440
return setTo(value, stream);
441
}
442
443
CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 );
444
445
typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream);
446
static const func_t funcs[7][4] =
447
{
448
{setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>},
449
{setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>},
450
{setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>},
451
{setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>},
452
{setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>},
453
{setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>},
454
{setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>}
455
};
456
457
funcs[depth()][channels() - 1](*this, mask, value, stream);
458
459
return *this;
460
}
461
462
/////////////////////////////////////////////////////
463
/// convertTo
464
465
namespace
466
{
467
template <typename T> struct ConvertToPolicy : DefaultTransformPolicy
468
{
469
};
470
template <> struct ConvertToPolicy<double> : DefaultTransformPolicy
471
{
472
enum {
473
shift = 1
474
};
475
};
476
477
template <typename T, typename D>
478
void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream)
479
{
480
typedef typename VecTraits<T>::elem_type src_elem_type;
481
typedef typename VecTraits<D>::elem_type dst_elem_type;
482
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
483
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
484
485
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream);
486
}
487
488
template <typename T, typename D, typename S> struct Convertor : unary_function<T, D>
489
{
490
S alpha;
491
S beta;
492
493
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const
494
{
495
return cudev::saturate_cast<D>(alpha * src + beta);
496
}
497
};
498
499
template <typename T, typename D>
500
void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream)
501
{
502
typedef typename VecTraits<T>::elem_type src_elem_type;
503
typedef typename VecTraits<D>::elem_type dst_elem_type;
504
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
505
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
506
507
Convertor<T, D, scalar_type> op;
508
op.alpha = cv::saturate_cast<scalar_type>(alpha);
509
op.beta = cv::saturate_cast<scalar_type>(beta);
510
511
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream);
512
}
513
514
template <typename T, typename D>
515
void convertScaleHalf(const GpuMat& src, const GpuMat& dst, Stream& stream)
516
{
517
typedef typename VecTraits<T>::elem_type src_elem_type;
518
typedef typename VecTraits<D>::elem_type dst_elem_type;
519
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
520
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
521
522
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_fp16_func<T,D>(), stream);
523
}
524
}
525
526
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
527
{
528
if (rtype < 0)
529
rtype = type();
530
else
531
rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels());
532
533
const int sdepth = depth();
534
const int ddepth = CV_MAT_DEPTH(rtype);
535
if (sdepth == ddepth)
536
{
537
if (stream)
538
copyTo(_dst, stream);
539
else
540
copyTo(_dst);
541
542
return;
543
}
544
545
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F );
546
547
GpuMat src = *this;
548
549
_dst.create(size(), rtype);
550
GpuMat dst = _dst.getGpuMat();
551
552
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
553
static const func_t funcs[7][7] =
554
{
555
{0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>},
556
{convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>},
557
{convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>},
558
{convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>},
559
{convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>},
560
{convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>},
561
{convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0}
562
};
563
564
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
565
}
566
567
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
568
{
569
if (rtype < 0)
570
rtype = type();
571
else
572
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
573
574
const int sdepth = depth();
575
const int ddepth = CV_MAT_DEPTH(rtype);
576
577
GpuMat src = *this;
578
579
_dst.create(size(), rtype);
580
GpuMat dst = _dst.getGpuMat();
581
582
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream);
583
static const func_t funcs[7][7] =
584
{
585
{convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>},
586
{convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>},
587
{convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>},
588
{convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>},
589
{convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>},
590
{convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>},
591
{convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>}
592
};
593
594
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream);
595
}
596
597
void cv::cuda::convertFp16(InputArray _src, OutputArray _dst, Stream& stream)
598
{
599
GpuMat src = _src.getGpuMat();
600
int ddepth = 0;
601
602
switch(src.depth())
603
{
604
case CV_32F:
605
ddepth = CV_16S;
606
break;
607
case CV_16S:
608
ddepth = CV_32F;
609
break;
610
default:
611
CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth");
612
return;
613
}
614
int type = CV_MAKE_TYPE(CV_MAT_DEPTH(ddepth), src.channels());
615
_dst.create(src.size(), type);
616
GpuMat dst = _dst.getGpuMat();
617
618
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
619
static const func_t funcs[] =
620
{
621
0, 0, 0,
622
convertScaleHalf<float, short>, 0, convertScaleHalf<short, float>,
623
0, 0,
624
};
625
626
funcs[ddepth](src.reshape(1), dst.reshape(1), stream);
627
}
628
629
#endif
630
631