Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/stitching/src/blenders.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "precomp.hpp"
44
#include "opencl_kernels_stitching.hpp"
45
46
#ifdef HAVE_CUDA
47
namespace cv { namespace cuda { namespace device
48
{
49
namespace blend
50
{
51
void addSrcWeightGpu16S(const PtrStep<short> src, const PtrStep<short> src_weight,
52
PtrStep<short> dst, PtrStep<short> dst_weight, cv::Rect &rc);
53
void addSrcWeightGpu32F(const PtrStep<short> src, const PtrStepf src_weight,
54
PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc);
55
void normalizeUsingWeightMapGpu16S(const PtrStep<short> weight, PtrStep<short> src,
56
const int width, const int height);
57
void normalizeUsingWeightMapGpu32F(const PtrStepf weight, PtrStep<short> src,
58
const int width, const int height);
59
}
60
}}}
61
#endif
62
63
namespace cv {
64
namespace detail {
65
66
static const float WEIGHT_EPS = 1e-5f;
67
68
Ptr<Blender> Blender::createDefault(int type, bool try_gpu)
69
{
70
if (type == NO)
71
return makePtr<Blender>();
72
if (type == FEATHER)
73
return makePtr<FeatherBlender>();
74
if (type == MULTI_BAND)
75
return makePtr<MultiBandBlender>(try_gpu);
76
CV_Error(Error::StsBadArg, "unsupported blending method");
77
}
78
79
80
void Blender::prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes)
81
{
82
prepare(resultRoi(corners, sizes));
83
}
84
85
86
void Blender::prepare(Rect dst_roi)
87
{
88
dst_.create(dst_roi.size(), CV_16SC3);
89
dst_.setTo(Scalar::all(0));
90
dst_mask_.create(dst_roi.size(), CV_8U);
91
dst_mask_.setTo(Scalar::all(0));
92
dst_roi_ = dst_roi;
93
}
94
95
96
void Blender::feed(InputArray _img, InputArray _mask, Point tl)
97
{
98
Mat img = _img.getMat();
99
Mat mask = _mask.getMat();
100
Mat dst = dst_.getMat(ACCESS_RW);
101
Mat dst_mask = dst_mask_.getMat(ACCESS_RW);
102
103
CV_Assert(img.type() == CV_16SC3);
104
CV_Assert(mask.type() == CV_8U);
105
int dx = tl.x - dst_roi_.x;
106
int dy = tl.y - dst_roi_.y;
107
108
for (int y = 0; y < img.rows; ++y)
109
{
110
const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);
111
Point3_<short> *dst_row = dst.ptr<Point3_<short> >(dy + y);
112
const uchar *mask_row = mask.ptr<uchar>(y);
113
uchar *dst_mask_row = dst_mask.ptr<uchar>(dy + y);
114
115
for (int x = 0; x < img.cols; ++x)
116
{
117
if (mask_row[x])
118
dst_row[dx + x] = src_row[x];
119
dst_mask_row[dx + x] |= mask_row[x];
120
}
121
}
122
}
123
124
125
void Blender::blend(InputOutputArray dst, InputOutputArray dst_mask)
126
{
127
UMat mask;
128
compare(dst_mask_, 0, mask, CMP_EQ);
129
dst_.setTo(Scalar::all(0), mask);
130
dst.assign(dst_);
131
dst_mask.assign(dst_mask_);
132
dst_.release();
133
dst_mask_.release();
134
}
135
136
137
void FeatherBlender::prepare(Rect dst_roi)
138
{
139
Blender::prepare(dst_roi);
140
dst_weight_map_.create(dst_roi.size(), CV_32F);
141
dst_weight_map_.setTo(0);
142
}
143
144
145
void FeatherBlender::feed(InputArray _img, InputArray mask, Point tl)
146
{
147
Mat img = _img.getMat();
148
Mat dst = dst_.getMat(ACCESS_RW);
149
150
CV_Assert(img.type() == CV_16SC3);
151
CV_Assert(mask.type() == CV_8U);
152
153
createWeightMap(mask, sharpness_, weight_map_);
154
Mat weight_map = weight_map_.getMat(ACCESS_READ);
155
Mat dst_weight_map = dst_weight_map_.getMat(ACCESS_RW);
156
157
int dx = tl.x - dst_roi_.x;
158
int dy = tl.y - dst_roi_.y;
159
160
for (int y = 0; y < img.rows; ++y)
161
{
162
const Point3_<short>* src_row = img.ptr<Point3_<short> >(y);
163
Point3_<short>* dst_row = dst.ptr<Point3_<short> >(dy + y);
164
const float* weight_row = weight_map.ptr<float>(y);
165
float* dst_weight_row = dst_weight_map.ptr<float>(dy + y);
166
167
for (int x = 0; x < img.cols; ++x)
168
{
169
dst_row[dx + x].x += static_cast<short>(src_row[x].x * weight_row[x]);
170
dst_row[dx + x].y += static_cast<short>(src_row[x].y * weight_row[x]);
171
dst_row[dx + x].z += static_cast<short>(src_row[x].z * weight_row[x]);
172
dst_weight_row[dx + x] += weight_row[x];
173
}
174
}
175
}
176
177
178
void FeatherBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
179
{
180
normalizeUsingWeightMap(dst_weight_map_, dst_);
181
compare(dst_weight_map_, WEIGHT_EPS, dst_mask_, CMP_GT);
182
Blender::blend(dst, dst_mask);
183
}
184
185
186
Rect FeatherBlender::createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
187
std::vector<UMat> &weight_maps)
188
{
189
weight_maps.resize(masks.size());
190
for (size_t i = 0; i < masks.size(); ++i)
191
createWeightMap(masks[i], sharpness_, weight_maps[i]);
192
193
Rect dst_roi = resultRoi(corners, masks);
194
Mat weights_sum(dst_roi.size(), CV_32F);
195
weights_sum.setTo(0);
196
197
for (size_t i = 0; i < weight_maps.size(); ++i)
198
{
199
Rect roi(corners[i].x - dst_roi.x, corners[i].y - dst_roi.y,
200
weight_maps[i].cols, weight_maps[i].rows);
201
add(weights_sum(roi), weight_maps[i], weights_sum(roi));
202
}
203
204
for (size_t i = 0; i < weight_maps.size(); ++i)
205
{
206
Rect roi(corners[i].x - dst_roi.x, corners[i].y - dst_roi.y,
207
weight_maps[i].cols, weight_maps[i].rows);
208
Mat tmp = weights_sum(roi);
209
tmp.setTo(1, tmp < std::numeric_limits<float>::epsilon());
210
divide(weight_maps[i], tmp, weight_maps[i]);
211
}
212
213
return dst_roi;
214
}
215
216
217
MultiBandBlender::MultiBandBlender(int try_gpu, int num_bands, int weight_type)
218
{
219
num_bands_ = 0;
220
setNumBands(num_bands);
221
222
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
223
can_use_gpu_ = try_gpu && cuda::getCudaEnabledDeviceCount();
224
gpu_feed_idx_ = 0;
225
#else
226
CV_UNUSED(try_gpu);
227
can_use_gpu_ = false;
228
#endif
229
230
CV_Assert(weight_type == CV_32F || weight_type == CV_16S);
231
weight_type_ = weight_type;
232
}
233
234
235
void MultiBandBlender::prepare(Rect dst_roi)
236
{
237
dst_roi_final_ = dst_roi;
238
239
// Crop unnecessary bands
240
double max_len = static_cast<double>(std::max(dst_roi.width, dst_roi.height));
241
num_bands_ = std::min(actual_num_bands_, static_cast<int>(ceil(std::log(max_len) / std::log(2.0))));
242
243
// Add border to the final image, to ensure sizes are divided by (1 << num_bands_)
244
dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_);
245
dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_);
246
247
Blender::prepare(dst_roi);
248
249
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
250
if (can_use_gpu_)
251
{
252
gpu_initialized_ = false;
253
gpu_feed_idx_ = 0;
254
255
gpu_tl_points_.clear();
256
gpu_weight_pyr_gauss_vec_.clear();
257
gpu_src_pyr_laplace_vec_.clear();
258
gpu_ups_.clear();
259
gpu_imgs_with_border_.clear();
260
261
gpu_dst_pyr_laplace_.resize(num_bands_ + 1);
262
gpu_dst_pyr_laplace_[0].create(dst_roi.size(), CV_16SC3);
263
gpu_dst_pyr_laplace_[0].setTo(Scalar::all(0));
264
265
gpu_dst_band_weights_.resize(num_bands_ + 1);
266
gpu_dst_band_weights_[0].create(dst_roi.size(), weight_type_);
267
gpu_dst_band_weights_[0].setTo(0);
268
269
for (int i = 1; i <= num_bands_; ++i)
270
{
271
gpu_dst_pyr_laplace_[i].create((gpu_dst_pyr_laplace_[i - 1].rows + 1) / 2,
272
(gpu_dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
273
gpu_dst_band_weights_[i].create((gpu_dst_band_weights_[i - 1].rows + 1) / 2,
274
(gpu_dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
275
gpu_dst_pyr_laplace_[i].setTo(Scalar::all(0));
276
gpu_dst_band_weights_[i].setTo(0);
277
}
278
}
279
else
280
#endif
281
{
282
dst_pyr_laplace_.resize(num_bands_ + 1);
283
dst_pyr_laplace_[0] = dst_;
284
285
dst_band_weights_.resize(num_bands_ + 1);
286
dst_band_weights_[0].create(dst_roi.size(), weight_type_);
287
dst_band_weights_[0].setTo(0);
288
289
for (int i = 1; i <= num_bands_; ++i)
290
{
291
dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2,
292
(dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
293
dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2,
294
(dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
295
dst_pyr_laplace_[i].setTo(Scalar::all(0));
296
dst_band_weights_[i].setTo(0);
297
}
298
}
299
}
300
301
#ifdef HAVE_OPENCL
302
static bool ocl_MultiBandBlender_feed(InputArray _src, InputArray _weight,
303
InputOutputArray _dst, InputOutputArray _dst_weight)
304
{
305
String buildOptions = "-D DEFINE_feed";
306
ocl::buildOptionsAddMatrixDescription(buildOptions, "src", _src);
307
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
308
ocl::buildOptionsAddMatrixDescription(buildOptions, "dst", _dst);
309
ocl::buildOptionsAddMatrixDescription(buildOptions, "dstWeight", _dst_weight);
310
ocl::Kernel k("feed", ocl::stitching::multibandblend_oclsrc, buildOptions);
311
if (k.empty())
312
return false;
313
314
UMat src = _src.getUMat();
315
316
k.args(ocl::KernelArg::ReadOnly(src),
317
ocl::KernelArg::ReadOnly(_weight.getUMat()),
318
ocl::KernelArg::ReadWrite(_dst.getUMat()),
319
ocl::KernelArg::ReadWrite(_dst_weight.getUMat())
320
);
321
322
size_t globalsize[2] = {(size_t)src.cols, (size_t)src.rows };
323
return k.run(2, globalsize, NULL, false);
324
}
325
#endif
326
327
void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
328
{
329
#if ENABLE_LOG
330
int64 t = getTickCount();
331
#endif
332
333
UMat img;
334
335
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
336
// If using gpu save the top left coordinate when running first time after prepare
337
if (can_use_gpu_)
338
{
339
if (!gpu_initialized_)
340
{
341
gpu_tl_points_.push_back(tl);
342
}
343
else
344
{
345
tl = gpu_tl_points_[gpu_feed_idx_];
346
}
347
}
348
// If _img is not a GpuMat get it as UMat from the InputArray object.
349
// If it is GpuMat make a dummy object with right dimensions but no data and
350
// get _img as a GpuMat
351
if (!_img.isGpuMat())
352
#endif
353
{
354
img = _img.getUMat();
355
}
356
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
357
else
358
{
359
gpu_img_ = _img.getGpuMat();
360
img = UMat(gpu_img_.rows, gpu_img_.cols, gpu_img_.type());
361
}
362
#endif
363
364
CV_Assert(img.type() == CV_16SC3 || img.type() == CV_8UC3);
365
CV_Assert(mask.type() == CV_8U);
366
367
// Keep source image in memory with small border
368
int gap = 3 * (1 << num_bands_);
369
Point tl_new(std::max(dst_roi_.x, tl.x - gap),
370
std::max(dst_roi_.y, tl.y - gap));
371
Point br_new(std::min(dst_roi_.br().x, tl.x + img.cols + gap),
372
std::min(dst_roi_.br().y, tl.y + img.rows + gap));
373
374
// Ensure coordinates of top-left, bottom-right corners are divided by (1 << num_bands_).
375
// After that scale between layers is exactly 2.
376
//
377
// We do it to avoid interpolation problems when keeping sub-images only. There is no such problem when
378
// image is bordered to have size equal to the final image size, but this is too memory hungry approach.
379
tl_new.x = dst_roi_.x + (((tl_new.x - dst_roi_.x) >> num_bands_) << num_bands_);
380
tl_new.y = dst_roi_.y + (((tl_new.y - dst_roi_.y) >> num_bands_) << num_bands_);
381
int width = br_new.x - tl_new.x;
382
int height = br_new.y - tl_new.y;
383
width += ((1 << num_bands_) - width % (1 << num_bands_)) % (1 << num_bands_);
384
height += ((1 << num_bands_) - height % (1 << num_bands_)) % (1 << num_bands_);
385
br_new.x = tl_new.x + width;
386
br_new.y = tl_new.y + height;
387
int dy = std::max(br_new.y - dst_roi_.br().y, 0);
388
int dx = std::max(br_new.x - dst_roi_.br().x, 0);
389
tl_new.x -= dx; br_new.x -= dx;
390
tl_new.y -= dy; br_new.y -= dy;
391
392
int top = tl.y - tl_new.y;
393
int left = tl.x - tl_new.x;
394
int bottom = br_new.y - tl.y - img.rows;
395
int right = br_new.x - tl.x - img.cols;
396
397
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
398
if (can_use_gpu_)
399
{
400
if (!gpu_initialized_)
401
{
402
gpu_imgs_with_border_.push_back(cuda::GpuMat());
403
gpu_weight_pyr_gauss_vec_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
404
gpu_src_pyr_laplace_vec_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
405
gpu_ups_.push_back(std::vector<cuda::GpuMat>(num_bands_));
406
}
407
408
// If _img is not GpuMat upload it to gpu else gpu_img_ was set already
409
if (!_img.isGpuMat())
410
{
411
gpu_img_.upload(img);
412
}
413
414
// Create the source image Laplacian pyramid
415
cuda::copyMakeBorder(gpu_img_, gpu_imgs_with_border_[gpu_feed_idx_], top, bottom,
416
left, right, BORDER_REFLECT);
417
gpu_imgs_with_border_[gpu_feed_idx_].convertTo(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][0], CV_16S);
418
for (int i = 0; i < num_bands_; ++i)
419
cuda::pyrDown(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i],
420
gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i + 1]);
421
for (int i = 0; i < num_bands_; ++i)
422
{
423
cuda::pyrUp(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i + 1], gpu_ups_[gpu_feed_idx_][i]);
424
cuda::subtract(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i],
425
gpu_ups_[gpu_feed_idx_][i],
426
gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i]);
427
}
428
429
// Create the weight map Gaussian pyramid only if not yet initialized
430
if (!gpu_initialized_)
431
{
432
if (mask.isGpuMat())
433
{
434
gpu_mask_ = mask.getGpuMat();
435
}
436
else
437
{
438
gpu_mask_.upload(mask);
439
}
440
441
if (weight_type_ == CV_32F)
442
{
443
gpu_mask_.convertTo(gpu_weight_map_, CV_32F, 1. / 255.);
444
}
445
else // weight_type_ == CV_16S
446
{
447
gpu_mask_.convertTo(gpu_weight_map_, CV_16S);
448
cuda::compare(gpu_mask_, 0, gpu_add_mask_, CMP_NE);
449
cuda::add(gpu_weight_map_, Scalar::all(1), gpu_weight_map_, gpu_add_mask_);
450
}
451
cuda::copyMakeBorder(gpu_weight_map_, gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][0], top,
452
bottom, left, right, BORDER_CONSTANT);
453
for (int i = 0; i < num_bands_; ++i)
454
cuda::pyrDown(gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i],
455
gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i + 1]);
456
}
457
458
int y_tl = tl_new.y - dst_roi_.y;
459
int y_br = br_new.y - dst_roi_.y;
460
int x_tl = tl_new.x - dst_roi_.x;
461
int x_br = br_new.x - dst_roi_.x;
462
463
// Add weighted layer of the source image to the final Laplacian pyramid layer
464
for (int i = 0; i <= num_bands_; ++i)
465
{
466
Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
467
cuda::GpuMat &_src_pyr_laplace = gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i];
468
cuda::GpuMat _dst_pyr_laplace = gpu_dst_pyr_laplace_[i](rc);
469
cuda::GpuMat &_weight_pyr_gauss = gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i];
470
cuda::GpuMat _dst_band_weights = gpu_dst_band_weights_[i](rc);
471
472
using namespace cv::cuda::device::blend;
473
if (weight_type_ == CV_32F)
474
{
475
addSrcWeightGpu32F(_src_pyr_laplace, _weight_pyr_gauss, _dst_pyr_laplace, _dst_band_weights, rc);
476
}
477
else
478
{
479
addSrcWeightGpu16S(_src_pyr_laplace, _weight_pyr_gauss, _dst_pyr_laplace, _dst_band_weights, rc);
480
}
481
x_tl /= 2; y_tl /= 2;
482
x_br /= 2; y_br /= 2;
483
}
484
++gpu_feed_idx_;
485
return;
486
}
487
#endif
488
489
// Create the source image Laplacian pyramid
490
UMat img_with_border;
491
copyMakeBorder(_img, img_with_border, top, bottom, left, right,
492
BORDER_REFLECT);
493
LOGLN(" Add border to the source image, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
494
#if ENABLE_LOG
495
t = getTickCount();
496
#endif
497
498
std::vector<UMat> src_pyr_laplace;
499
createLaplacePyr(img_with_border, num_bands_, src_pyr_laplace);
500
501
LOGLN(" Create the source image Laplacian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
502
#if ENABLE_LOG
503
t = getTickCount();
504
#endif
505
506
// Create the weight map Gaussian pyramid
507
UMat weight_map;
508
std::vector<UMat> weight_pyr_gauss(num_bands_ + 1);
509
510
if (weight_type_ == CV_32F)
511
{
512
mask.getUMat().convertTo(weight_map, CV_32F, 1./255.);
513
}
514
else // weight_type_ == CV_16S
515
{
516
mask.getUMat().convertTo(weight_map, CV_16S);
517
UMat add_mask;
518
compare(mask, 0, add_mask, CMP_NE);
519
add(weight_map, Scalar::all(1), weight_map, add_mask);
520
}
521
522
copyMakeBorder(weight_map, weight_pyr_gauss[0], top, bottom, left, right, BORDER_CONSTANT);
523
524
for (int i = 0; i < num_bands_; ++i)
525
pyrDown(weight_pyr_gauss[i], weight_pyr_gauss[i + 1]);
526
527
LOGLN(" Create the weight map Gaussian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
528
#if ENABLE_LOG
529
t = getTickCount();
530
#endif
531
532
int y_tl = tl_new.y - dst_roi_.y;
533
int y_br = br_new.y - dst_roi_.y;
534
int x_tl = tl_new.x - dst_roi_.x;
535
int x_br = br_new.x - dst_roi_.x;
536
537
// Add weighted layer of the source image to the final Laplacian pyramid layer
538
for (int i = 0; i <= num_bands_; ++i)
539
{
540
Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
541
#ifdef HAVE_OPENCL
542
if ( !cv::ocl::isOpenCLActivated() ||
543
!ocl_MultiBandBlender_feed(src_pyr_laplace[i], weight_pyr_gauss[i],
544
dst_pyr_laplace_[i](rc), dst_band_weights_[i](rc)) )
545
#endif
546
{
547
Mat _src_pyr_laplace = src_pyr_laplace[i].getMat(ACCESS_READ);
548
Mat _dst_pyr_laplace = dst_pyr_laplace_[i](rc).getMat(ACCESS_RW);
549
Mat _weight_pyr_gauss = weight_pyr_gauss[i].getMat(ACCESS_READ);
550
Mat _dst_band_weights = dst_band_weights_[i](rc).getMat(ACCESS_RW);
551
if (weight_type_ == CV_32F)
552
{
553
for (int y = 0; y < rc.height; ++y)
554
{
555
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
556
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
557
const float* weight_row = _weight_pyr_gauss.ptr<float>(y);
558
float* dst_weight_row = _dst_band_weights.ptr<float>(y);
559
560
for (int x = 0; x < rc.width; ++x)
561
{
562
dst_row[x].x += static_cast<short>(src_row[x].x * weight_row[x]);
563
dst_row[x].y += static_cast<short>(src_row[x].y * weight_row[x]);
564
dst_row[x].z += static_cast<short>(src_row[x].z * weight_row[x]);
565
dst_weight_row[x] += weight_row[x];
566
}
567
}
568
}
569
else // weight_type_ == CV_16S
570
{
571
for (int y = 0; y < y_br - y_tl; ++y)
572
{
573
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
574
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
575
const short* weight_row = _weight_pyr_gauss.ptr<short>(y);
576
short* dst_weight_row = _dst_band_weights.ptr<short>(y);
577
578
for (int x = 0; x < x_br - x_tl; ++x)
579
{
580
dst_row[x].x += short((src_row[x].x * weight_row[x]) >> 8);
581
dst_row[x].y += short((src_row[x].y * weight_row[x]) >> 8);
582
dst_row[x].z += short((src_row[x].z * weight_row[x]) >> 8);
583
dst_weight_row[x] += weight_row[x];
584
}
585
}
586
}
587
}
588
#ifdef HAVE_OPENCL
589
else
590
{
591
CV_IMPL_ADD(CV_IMPL_OCL);
592
}
593
#endif
594
595
x_tl /= 2; y_tl /= 2;
596
x_br /= 2; y_br /= 2;
597
}
598
599
LOGLN(" Add weighted layer of the source image to the final Laplacian pyramid layer, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
600
}
601
602
603
void MultiBandBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
604
{
605
Rect dst_rc(0, 0, dst_roi_final_.width, dst_roi_final_.height);
606
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
607
if (can_use_gpu_)
608
{
609
if (!gpu_initialized_)
610
{
611
gpu_ups_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
612
}
613
614
for (int i = 0; i <= num_bands_; ++i)
615
{
616
cuda::GpuMat dst_i = gpu_dst_pyr_laplace_[i];
617
cuda::GpuMat weight_i = gpu_dst_band_weights_[i];
618
619
using namespace ::cv::cuda::device::blend;
620
if (weight_type_ == CV_32F)
621
{
622
normalizeUsingWeightMapGpu32F(weight_i, dst_i, weight_i.cols, weight_i.rows);
623
}
624
else
625
{
626
normalizeUsingWeightMapGpu16S(weight_i, dst_i, weight_i.cols, weight_i.rows);
627
}
628
}
629
630
// Restore image from Laplacian pyramid
631
for (size_t i = num_bands_; i > 0; --i)
632
{
633
cuda::pyrUp(gpu_dst_pyr_laplace_[i], gpu_ups_[gpu_ups_.size()-1][num_bands_-i]);
634
cuda::add(gpu_ups_[gpu_ups_.size()-1][num_bands_-i],
635
gpu_dst_pyr_laplace_[i - 1],
636
gpu_dst_pyr_laplace_[i - 1]);
637
}
638
639
// If dst is GpuMat do masking on gpu and return dst as a GpuMat
640
// else download the image to cpu and return it as an ordinary Mat
641
if (dst.isGpuMat())
642
{
643
cuda::GpuMat &gpu_dst = dst.getGpuMatRef();
644
645
cuda::compare(gpu_dst_band_weights_[0](dst_rc), WEIGHT_EPS, gpu_dst_mask_, CMP_GT);
646
647
cuda::compare(gpu_dst_mask_, 0, gpu_mask_, CMP_EQ);
648
649
gpu_dst_pyr_laplace_[0](dst_rc).setTo(Scalar::all(0), gpu_mask_);
650
gpu_dst_pyr_laplace_[0](dst_rc).convertTo(gpu_dst, CV_16S);
651
652
}
653
else
654
{
655
gpu_dst_pyr_laplace_[0](dst_rc).download(dst_);
656
Mat dst_band_weights_0;
657
gpu_dst_band_weights_[0].download(dst_band_weights_0);
658
659
compare(dst_band_weights_0(dst_rc), WEIGHT_EPS, dst_mask_, CMP_GT);
660
Blender::blend(dst, dst_mask);
661
}
662
663
// Set destination Mats to 0 so new image can be blended
664
for (size_t i = 0; i < (size_t)(num_bands_ + 1); ++i)
665
{
666
gpu_dst_band_weights_[i].setTo(0);
667
gpu_dst_pyr_laplace_[i].setTo(Scalar::all(0));
668
}
669
gpu_feed_idx_ = 0;
670
gpu_initialized_ = true;
671
}
672
else
673
#endif
674
{
675
cv::UMat dst_band_weights_0;
676
677
for (int i = 0; i <= num_bands_; ++i)
678
normalizeUsingWeightMap(dst_band_weights_[i], dst_pyr_laplace_[i]);
679
680
restoreImageFromLaplacePyr(dst_pyr_laplace_);
681
682
dst_ = dst_pyr_laplace_[0](dst_rc);
683
dst_band_weights_0 = dst_band_weights_[0];
684
685
dst_pyr_laplace_.clear();
686
dst_band_weights_.clear();
687
688
compare(dst_band_weights_0(dst_rc), WEIGHT_EPS, dst_mask_, CMP_GT);
689
690
Blender::blend(dst, dst_mask);
691
}
692
}
693
694
695
//////////////////////////////////////////////////////////////////////////////
696
// Auxiliary functions
697
698
#ifdef HAVE_OPENCL
699
static bool ocl_normalizeUsingWeightMap(InputArray _weight, InputOutputArray _mat)
700
{
701
String buildOptions = "-D DEFINE_normalizeUsingWeightMap";
702
ocl::buildOptionsAddMatrixDescription(buildOptions, "mat", _mat);
703
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
704
ocl::Kernel k("normalizeUsingWeightMap", ocl::stitching::multibandblend_oclsrc, buildOptions);
705
if (k.empty())
706
return false;
707
708
UMat mat = _mat.getUMat();
709
710
k.args(ocl::KernelArg::ReadWrite(mat),
711
ocl::KernelArg::ReadOnly(_weight.getUMat())
712
);
713
714
size_t globalsize[2] = {(size_t)mat.cols, (size_t)mat.rows };
715
return k.run(2, globalsize, NULL, false);
716
}
717
#endif
718
719
void normalizeUsingWeightMap(InputArray _weight, InputOutputArray _src)
720
{
721
Mat src;
722
Mat weight;
723
724
#ifdef HAVE_OPENCL
725
if ( !cv::ocl::isOpenCLActivated() ||
726
!ocl_normalizeUsingWeightMap(_weight, _src) )
727
#endif
728
{
729
src = _src.getMat();
730
weight = _weight.getMat();
731
732
CV_Assert(src.type() == CV_16SC3);
733
734
if (weight.type() == CV_32FC1)
735
{
736
for (int y = 0; y < src.rows; ++y)
737
{
738
Point3_<short> *row = src.ptr<Point3_<short> >(y);
739
const float *weight_row = weight.ptr<float>(y);
740
741
for (int x = 0; x < src.cols; ++x)
742
{
743
row[x].x = static_cast<short>(row[x].x / (weight_row[x] + WEIGHT_EPS));
744
row[x].y = static_cast<short>(row[x].y / (weight_row[x] + WEIGHT_EPS));
745
row[x].z = static_cast<short>(row[x].z / (weight_row[x] + WEIGHT_EPS));
746
}
747
}
748
}
749
else
750
{
751
CV_Assert(weight.type() == CV_16SC1);
752
753
for (int y = 0; y < src.rows; ++y)
754
{
755
const short *weight_row = weight.ptr<short>(y);
756
Point3_<short> *row = src.ptr<Point3_<short> >(y);
757
758
for (int x = 0; x < src.cols; ++x)
759
{
760
int w = weight_row[x] + 1;
761
row[x].x = static_cast<short>((row[x].x << 8) / w);
762
row[x].y = static_cast<short>((row[x].y << 8) / w);
763
row[x].z = static_cast<short>((row[x].z << 8) / w);
764
}
765
}
766
}
767
}
768
#ifdef HAVE_OPENCL
769
else
770
{
771
CV_IMPL_ADD(CV_IMPL_OCL);
772
}
773
#endif
774
}
775
776
777
void createWeightMap(InputArray mask, float sharpness, InputOutputArray weight)
778
{
779
CV_Assert(mask.type() == CV_8U);
780
distanceTransform(mask, weight, DIST_L1, 3);
781
UMat tmp;
782
multiply(weight, sharpness, tmp);
783
threshold(tmp, weight, 1.f, 1.f, THRESH_TRUNC);
784
}
785
786
787
void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr)
788
{
789
pyr.resize(num_levels + 1);
790
791
if(img.depth() == CV_8U)
792
{
793
if(num_levels == 0)
794
{
795
img.getUMat().convertTo(pyr[0], CV_16S);
796
return;
797
}
798
799
UMat downNext;
800
UMat current = img.getUMat();
801
pyrDown(img, downNext);
802
803
for(int i = 1; i < num_levels; ++i)
804
{
805
UMat lvl_up;
806
UMat lvl_down;
807
808
pyrDown(downNext, lvl_down);
809
pyrUp(downNext, lvl_up, current.size());
810
subtract(current, lvl_up, pyr[i-1], noArray(), CV_16S);
811
812
current = downNext;
813
downNext = lvl_down;
814
}
815
816
{
817
UMat lvl_up;
818
pyrUp(downNext, lvl_up, current.size());
819
subtract(current, lvl_up, pyr[num_levels-1], noArray(), CV_16S);
820
821
downNext.convertTo(pyr[num_levels], CV_16S);
822
}
823
}
824
else
825
{
826
pyr[0] = img.getUMat();
827
for (int i = 0; i < num_levels; ++i)
828
pyrDown(pyr[i], pyr[i + 1]);
829
UMat tmp;
830
for (int i = 0; i < num_levels; ++i)
831
{
832
pyrUp(pyr[i + 1], tmp, pyr[i].size());
833
subtract(pyr[i], tmp, pyr[i]);
834
}
835
}
836
}
837
838
839
void createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat> &pyr)
840
{
841
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
842
pyr.resize(num_levels + 1);
843
844
std::vector<cuda::GpuMat> gpu_pyr(num_levels + 1);
845
gpu_pyr[0].upload(img);
846
for (int i = 0; i < num_levels; ++i)
847
cuda::pyrDown(gpu_pyr[i], gpu_pyr[i + 1]);
848
849
cuda::GpuMat tmp;
850
for (int i = 0; i < num_levels; ++i)
851
{
852
cuda::pyrUp(gpu_pyr[i + 1], tmp);
853
cuda::subtract(gpu_pyr[i], tmp, gpu_pyr[i]);
854
gpu_pyr[i].download(pyr[i]);
855
}
856
857
gpu_pyr[num_levels].download(pyr[num_levels]);
858
#else
859
CV_UNUSED(img);
860
CV_UNUSED(num_levels);
861
CV_UNUSED(pyr);
862
CV_Error(Error::StsNotImplemented, "CUDA optimization is unavailable");
863
#endif
864
}
865
866
867
void restoreImageFromLaplacePyr(std::vector<UMat> &pyr)
868
{
869
if (pyr.empty())
870
return;
871
UMat tmp;
872
for (size_t i = pyr.size() - 1; i > 0; --i)
873
{
874
pyrUp(pyr[i], tmp, pyr[i - 1].size());
875
add(tmp, pyr[i - 1], pyr[i - 1]);
876
}
877
}
878
879
880
void restoreImageFromLaplacePyrGpu(std::vector<UMat> &pyr)
881
{
882
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
883
if (pyr.empty())
884
return;
885
886
std::vector<cuda::GpuMat> gpu_pyr(pyr.size());
887
for (size_t i = 0; i < pyr.size(); ++i)
888
gpu_pyr[i].upload(pyr[i]);
889
890
cuda::GpuMat tmp;
891
for (size_t i = pyr.size() - 1; i > 0; --i)
892
{
893
cuda::pyrUp(gpu_pyr[i], tmp);
894
cuda::add(tmp, gpu_pyr[i - 1], gpu_pyr[i - 1]);
895
}
896
897
gpu_pyr[0].download(pyr[0]);
898
#else
899
CV_UNUSED(pyr);
900
CV_Error(Error::StsNotImplemented, "CUDA optimization is unavailable");
901
#endif
902
}
903
904
} // namespace detail
905
} // namespace cv
906
907