Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/gapi/src/backends/cpu/gcpubackend.cpp
16344 views
1
// This file is part of OpenCV project.
2
// It is subject to the license terms in the LICENSE file found in the top-level directory
3
// of this distribution and at http://opencv.org/license.html.
4
//
5
// Copyright (C) 2018 Intel Corporation
6
7
8
#include "precomp.hpp"
9
10
#include <functional>
11
#include <unordered_set>
12
13
#include <ade/util/algorithm.hpp>
14
15
#include <ade/util/range.hpp>
16
#include <ade/util/zip_range.hpp>
17
#include <ade/util/chain_range.hpp>
18
19
#include <ade/typed_graph.hpp>
20
21
#include "opencv2/gapi/gcommon.hpp"
22
#include "opencv2/gapi/util/any.hpp"
23
#include "opencv2/gapi/gtype_traits.hpp"
24
25
#include "compiler/gobjref.hpp"
26
#include "compiler/gmodel.hpp"
27
28
#include "backends/cpu/gcpubackend.hpp"
29
#include "backends/cpu/gcpuimgproc.hpp"
30
#include "backends/cpu/gcpucore.hpp"
31
32
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
33
34
// FIXME: Is there a way to take a typed graph (our GModel),
35
// and create a new typed graph _ATOP_ of that (by extending with a couple of
36
// new types?).
37
// Alternatively, is there a way to compose types graphs?
38
//
39
// If not, we need to introduce that!
40
using GCPUModel = ade::TypedGraph
41
< cv::gimpl::Unit
42
, cv::gimpl::Protocol
43
>;
44
45
// FIXME: Same issue with Typed and ConstTyped
46
using GConstGCPUModel = ade::ConstTypedGraph
47
< cv::gimpl::Unit
48
, cv::gimpl::Protocol
49
>;
50
51
namespace
52
{
53
class GCPUBackendImpl final: public cv::gapi::GBackend::Priv
54
{
55
virtual void unpackKernel(ade::Graph &graph,
56
const ade::NodeHandle &op_node,
57
const cv::GKernelImpl &impl) override
58
{
59
GCPUModel gm(graph);
60
auto cpu_impl = cv::util::any_cast<cv::GCPUKernel>(impl.opaque);
61
gm.metadata(op_node).set(cv::gimpl::Unit{cpu_impl});
62
}
63
64
virtual EPtr compile(const ade::Graph &graph,
65
const cv::GCompileArgs &,
66
const std::vector<ade::NodeHandle> &nodes) const override
67
{
68
return EPtr{new cv::gimpl::GCPUExecutable(graph, nodes)};
69
}
70
};
71
}
72
73
cv::gapi::GBackend cv::gapi::cpu::backend()
74
{
75
static cv::gapi::GBackend this_backend(std::make_shared<GCPUBackendImpl>());
76
return this_backend;
77
}
78
79
// GCPUExcecutable implementation //////////////////////////////////////////////
80
cv::gimpl::GCPUExecutable::GCPUExecutable(const ade::Graph &g,
81
const std::vector<ade::NodeHandle> &nodes)
82
: m_g(g), m_gm(m_g)
83
{
84
// Convert list of operations (which is topologically sorted already)
85
// into an execution script.
86
for (auto &nh : nodes)
87
{
88
switch (m_gm.metadata(nh).get<NodeType>().t)
89
{
90
case NodeType::OP: m_script.push_back({nh, GModel::collectOutputMeta(m_gm, nh)}); break;
91
case NodeType::DATA:
92
{
93
m_dataNodes.push_back(nh);
94
const auto &desc = m_gm.metadata(nh).get<Data>();
95
if (desc.storage == Data::Storage::CONST)
96
{
97
auto rc = RcDesc{desc.rc, desc.shape, desc.ctor};
98
magazine::bindInArg(m_res, rc, m_gm.metadata(nh).get<ConstValue>().arg);
99
}
100
//preallocate internal Mats in advance
101
if (desc.storage == Data::Storage::INTERNAL && desc.shape == GShape::GMAT)
102
{
103
const auto mat_desc = util::get<cv::GMatDesc>(desc.meta);
104
const auto type = CV_MAKETYPE(mat_desc.depth, mat_desc.chan);
105
m_res.slot<cv::gapi::own::Mat>()[desc.rc].create(mat_desc.size, type);
106
}
107
break;
108
}
109
default: util::throw_error(std::logic_error("Unsupported NodeType type"));
110
}
111
}
112
}
113
114
// FIXME: Document what it does
115
cv::GArg cv::gimpl::GCPUExecutable::packArg(const GArg &arg)
116
{
117
// No API placeholders allowed at this point
118
// FIXME: this check has to be done somewhere in compilation stage.
119
GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT
120
&& arg.kind != cv::detail::ArgKind::GSCALAR
121
&& arg.kind != cv::detail::ArgKind::GARRAY);
122
123
if (arg.kind != cv::detail::ArgKind::GOBJREF)
124
{
125
// All other cases - pass as-is, with no transformations to GArg contents.
126
return arg;
127
}
128
GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF);
129
130
// Wrap associated CPU object (either host or an internal one)
131
// FIXME: object can be moved out!!! GExecutor faced that.
132
const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>();
133
switch (ref.shape)
134
{
135
case GShape::GMAT: return GArg(m_res.slot<cv::gapi::own::Mat>() [ref.id]);
136
case GShape::GSCALAR: return GArg(m_res.slot<cv::gapi::own::Scalar>()[ref.id]);
137
// Note: .at() is intentional for GArray as object MUST be already there
138
// (and constructed by either bindIn/Out or resetInternal)
139
case GShape::GARRAY: return GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
140
default:
141
util::throw_error(std::logic_error("Unsupported GShape type"));
142
break;
143
}
144
}
145
146
void cv::gimpl::GCPUExecutable::run(std::vector<InObj> &&input_objs,
147
std::vector<OutObj> &&output_objs)
148
{
149
// Update resources with run-time information - what this Island
150
// has received from user (or from another Island, or mix...)
151
// FIXME: Check input/output objects against GIsland protocol
152
153
for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second);
154
for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second);
155
156
// Initialize (reset) internal data nodes with user structures
157
// before processing a frame (no need to do it for external data structures)
158
GModel::ConstGraph gm(m_g);
159
for (auto nh : m_dataNodes)
160
{
161
const auto &desc = gm.metadata(nh).get<Data>();
162
163
if ( desc.storage == Data::Storage::INTERNAL
164
&& !util::holds_alternative<util::monostate>(desc.ctor))
165
{
166
// FIXME: Note that compile-time constant data objects (like
167
// a value-initialized GArray<T>) also satisfy this condition
168
// and should be excluded, but now we just don't support it
169
magazine::resetInternalData(m_res, desc);
170
}
171
}
172
173
// OpenCV backend execution is not a rocket science at all.
174
// Simply invoke our kernels in the proper order.
175
GConstGCPUModel gcm(m_g);
176
for (auto &op_info : m_script)
177
{
178
const auto &op = m_gm.metadata(op_info.nh).get<Op>();
179
180
// Obtain our real execution unit
181
// TODO: Should kernels be copyable?
182
GCPUKernel k = gcm.metadata(op_info.nh).get<Unit>().k;
183
184
// Initialize kernel's execution context:
185
// - Input parameters
186
GCPUContext context;
187
context.m_args.reserve(op.args.size());
188
189
using namespace std::placeholders;
190
ade::util::transform(op.args,
191
std::back_inserter(context.m_args),
192
std::bind(&GCPUExecutable::packArg, this, _1));
193
194
// - Output parameters.
195
// FIXME: pre-allocate internal Mats, etc, according to the known meta
196
for (const auto &out_it : ade::util::indexed(op.outs))
197
{
198
// FIXME: Can the same GArg type resolution mechanism be reused here?
199
const auto out_port = ade::util::index(out_it);
200
const auto out_desc = ade::util::value(out_it);
201
context.m_results[out_port] = magazine::getObjPtr(m_res, out_desc);
202
}
203
204
// Now trigger the executable unit
205
k.apply(context);
206
207
//As Kernels are forbidden to allocate memory for (Mat) outputs,
208
//this code seems redundant, at least for Mats
209
//FIXME: unify with cv::detail::ensure_out_mats_not_reallocated
210
for (const auto &out_it : ade::util::indexed(op_info.expected_out_metas))
211
{
212
const auto out_index = ade::util::index(out_it);
213
const auto expected_meta = ade::util::value(out_it);
214
const auto out_meta = descr_of(context.m_results[out_index]);
215
216
if (expected_meta != out_meta)
217
{
218
util::throw_error
219
(std::logic_error
220
("Output meta doesn't "
221
"coincide with the generated meta\n"
222
"Expected: " + ade::util::to_string(expected_meta) + "\n"
223
"Actual : " + ade::util::to_string(out_meta)));
224
}
225
}
226
} // for(m_script)
227
228
for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second);
229
}
230
231