Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/src/layers/layers_common.simd.hpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14
// Copyright (C) 2017, Intel Corporation, all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "opencv2/core/hal/intrin.hpp"
44
45
namespace cv {
46
namespace dnn {
47
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
48
49
void fastConv( const float* weights, size_t wstep, const float* bias,
50
const float* rowbuf, float* output, const int* outShape,
51
int blockSize, int vecsize, int vecsize_aligned,
52
const float* relu, bool initOutput );
53
void fastGEMM1T( const float* vec, const float* weights,
54
size_t wstep, const float* bias,
55
float* dst, int nvecs, int vecsize );
56
void fastGEMM( const float* aptr, size_t astep, const float* bptr,
57
size_t bstep, float* cptr, size_t cstep,
58
int ma, int na, int nb );
59
60
#if !defined(CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY) && CV_AVX
61
62
#if !CV_FMA3 // AVX workaround
63
#undef _mm256_fmadd_ps
64
#define _mm256_fmadd_ps(a, b, c) _mm256_add_ps(c, _mm256_mul_ps(a, b))
65
#endif
66
67
void fastConv( const float* weights, size_t wstep, const float* bias,
68
const float* rowbuf, float* output, const int* outShape,
69
int blockSize, int vecsize, int vecsize_aligned,
70
const float* relu, bool initOutput )
71
{
72
int outCn = outShape[1];
73
size_t outPlaneSize = outShape[2]*outShape[3];
74
float r0 = 1.f, r1 = 1.f, r2 = 1.f;
75
__m128 vr0 = _mm_set1_ps(1.f), vr1 = vr0, vr2 = vr0, z = _mm_setzero_ps();
76
77
// now compute dot product of the weights
78
// and im2row-transformed part of the tensor
79
for( int i = 0; i < outCn; i += 3 )
80
{
81
const float* wptr0 = weights + i*wstep;
82
const float* wptr1 = wptr0 + wstep;
83
const float* wptr2 = wptr1 + wstep;
84
float* outptr0 = output + i*outPlaneSize;
85
float* outptr1 = outptr0 + outPlaneSize;
86
float* outptr2 = outptr1 + outPlaneSize;
87
float bias0 = bias[i], bias1 = bias[i+1], bias2 = bias[i+2];
88
89
if( i+2 >= outCn )
90
{
91
wptr2 = wptr1;
92
outptr2 = outptr1;
93
bias2 = bias1;
94
if( i+1 >= outCn )
95
{
96
wptr2 = wptr1 = wptr0;
97
outptr2 = outptr1 = outptr0;
98
bias2 = bias1 = bias0;
99
}
100
}
101
102
if( relu )
103
{
104
r0 = relu[i]; r1 = relu[i+1]; r2 = relu[i+2];
105
if( i+2 >= outCn )
106
{
107
r2 = r1;
108
if( i+1 >= outCn )
109
r2 = r1 = r0;
110
}
111
vr0 = _mm_set1_ps(r0);
112
vr1 = _mm_set1_ps(r1);
113
vr2 = _mm_set1_ps(r2);
114
}
115
116
int j = 0;
117
for( ; j <= blockSize - 4; j += 4 )
118
{
119
int k = 0;
120
const float* rptr = rowbuf + j*vecsize_aligned;
121
122
__m256 vs00 = _mm256_setzero_ps(), vs01 = _mm256_setzero_ps(),
123
vs02 = _mm256_setzero_ps(), vs03 = _mm256_setzero_ps(),
124
vs10 = _mm256_setzero_ps(), vs11 = _mm256_setzero_ps(),
125
vs12 = _mm256_setzero_ps(), vs13 = _mm256_setzero_ps(),
126
vs20 = _mm256_setzero_ps(), vs21 = _mm256_setzero_ps(),
127
vs22 = _mm256_setzero_ps(), vs23 = _mm256_setzero_ps();
128
129
#if CV_AVX512_SKX // AVX512VL is necessary to avoid register spilling
130
if (vecsize >= 32)
131
{
132
__m512 vs00_5 = _mm512_setzero_ps(), vs01_5 = _mm512_setzero_ps(),
133
vs02_5 = _mm512_setzero_ps(), vs03_5 = _mm512_setzero_ps(),
134
vs10_5 = _mm512_setzero_ps(), vs11_5 = _mm512_setzero_ps(),
135
vs12_5 = _mm512_setzero_ps(), vs13_5 = _mm512_setzero_ps(),
136
vs20_5 = _mm512_setzero_ps(), vs21_5 = _mm512_setzero_ps(),
137
vs22_5 = _mm512_setzero_ps(), vs23_5 = _mm512_setzero_ps();
138
139
for (; k <= vecsize - 16; k += 16, rptr += 16)
140
{
141
__m512 w0 = _mm512_loadu_ps(wptr0 + k);
142
__m512 w1 = _mm512_loadu_ps(wptr1 + k);
143
__m512 w2 = _mm512_loadu_ps(wptr2 + k);
144
__m512 r0 = _mm512_loadu_ps(rptr);
145
146
vs00_5 = _mm512_fmadd_ps(w0, r0, vs00_5);
147
vs10_5 = _mm512_fmadd_ps(w1, r0, vs10_5);
148
vs20_5 = _mm512_fmadd_ps(w2, r0, vs20_5);
149
150
r0 = _mm512_loadu_ps(rptr + vecsize_aligned);
151
vs01_5 = _mm512_fmadd_ps(w0, r0, vs01_5);
152
vs11_5 = _mm512_fmadd_ps(w1, r0, vs11_5);
153
vs21_5 = _mm512_fmadd_ps(w2, r0, vs21_5);
154
155
r0 = _mm512_loadu_ps(rptr + vecsize_aligned*2);
156
vs02_5 = _mm512_fmadd_ps(w0, r0, vs02_5);
157
vs12_5 = _mm512_fmadd_ps(w1, r0, vs12_5);
158
vs22_5 = _mm512_fmadd_ps(w2, r0, vs22_5);
159
160
r0 = _mm512_loadu_ps(rptr + vecsize_aligned*3);
161
vs03_5 = _mm512_fmadd_ps(w0, r0, vs03_5);
162
vs13_5 = _mm512_fmadd_ps(w1, r0, vs13_5);
163
vs23_5 = _mm512_fmadd_ps(w2, r0, vs23_5);
164
}
165
/*
166
* now fold the 512 bit accumulator vectors into 256 bit vectors so that the AVX2 code can finish
167
* the tail of the vector
168
*/
169
vs00 = _mm256_add_ps( _mm512_extractf32x8_ps(vs00_5, 0), _mm512_extractf32x8_ps(vs00_5, 1));
170
vs10 = _mm256_add_ps( _mm512_extractf32x8_ps(vs10_5, 0), _mm512_extractf32x8_ps(vs10_5, 1));
171
vs20 = _mm256_add_ps( _mm512_extractf32x8_ps(vs20_5, 0), _mm512_extractf32x8_ps(vs20_5, 1));
172
173
vs01 = _mm256_add_ps( _mm512_extractf32x8_ps(vs01_5, 0), _mm512_extractf32x8_ps(vs01_5, 1));
174
vs11 = _mm256_add_ps( _mm512_extractf32x8_ps(vs11_5, 0), _mm512_extractf32x8_ps(vs11_5, 1));
175
vs21 = _mm256_add_ps( _mm512_extractf32x8_ps(vs21_5, 0), _mm512_extractf32x8_ps(vs21_5, 1));
176
177
vs02 = _mm256_add_ps( _mm512_extractf32x8_ps(vs02_5, 0), _mm512_extractf32x8_ps(vs02_5, 1));
178
vs12 = _mm256_add_ps( _mm512_extractf32x8_ps(vs12_5, 0), _mm512_extractf32x8_ps(vs12_5, 1));
179
vs22 = _mm256_add_ps( _mm512_extractf32x8_ps(vs22_5, 0), _mm512_extractf32x8_ps(vs22_5, 1));
180
181
vs03 = _mm256_add_ps( _mm512_extractf32x8_ps(vs03_5, 0), _mm512_extractf32x8_ps(vs03_5, 1));
182
vs13 = _mm256_add_ps( _mm512_extractf32x8_ps(vs13_5, 0), _mm512_extractf32x8_ps(vs13_5, 1));
183
vs23 = _mm256_add_ps( _mm512_extractf32x8_ps(vs23_5, 0), _mm512_extractf32x8_ps(vs23_5, 1));
184
}
185
#endif
186
187
for (; k < vecsize; k += 8, rptr += 8 )
188
{
189
__m256 w0 = _mm256_load_ps(wptr0 + k);
190
__m256 w1 = _mm256_load_ps(wptr1 + k);
191
__m256 w2 = _mm256_load_ps(wptr2 + k);
192
__m256 r0 = _mm256_load_ps(rptr);
193
194
vs00 = _mm256_fmadd_ps(w0, r0, vs00);
195
vs10 = _mm256_fmadd_ps(w1, r0, vs10);
196
vs20 = _mm256_fmadd_ps(w2, r0, vs20);
197
198
r0 = _mm256_load_ps(rptr + vecsize_aligned);
199
vs01 = _mm256_fmadd_ps(w0, r0, vs01);
200
vs11 = _mm256_fmadd_ps(w1, r0, vs11);
201
vs21 = _mm256_fmadd_ps(w2, r0, vs21);
202
203
r0 = _mm256_load_ps(rptr + vecsize_aligned*2);
204
vs02 = _mm256_fmadd_ps(w0, r0, vs02);
205
vs12 = _mm256_fmadd_ps(w1, r0, vs12);
206
vs22 = _mm256_fmadd_ps(w2, r0, vs22);
207
208
r0 = _mm256_load_ps(rptr + vecsize_aligned*3);
209
vs03 = _mm256_fmadd_ps(w0, r0, vs03);
210
vs13 = _mm256_fmadd_ps(w1, r0, vs13);
211
vs23 = _mm256_fmadd_ps(w2, r0, vs23);
212
}
213
214
__m256 t0 = _mm256_hadd_ps(_mm256_hadd_ps(vs00, vs01), _mm256_hadd_ps(vs02, vs03));
215
__m256 t1 = _mm256_hadd_ps(_mm256_hadd_ps(vs10, vs11), _mm256_hadd_ps(vs12, vs13));
216
__m256 t2 = _mm256_hadd_ps(_mm256_hadd_ps(vs20, vs21), _mm256_hadd_ps(vs22, vs23));
217
218
t0 = _mm256_add_ps(t0, _mm256_permute2f128_ps(t0, t0, 1));
219
t1 = _mm256_add_ps(t1, _mm256_permute2f128_ps(t1, t1, 1));
220
t2 = _mm256_add_ps(t2, _mm256_permute2f128_ps(t2, t2, 1));
221
222
__m128 s0, s1, s2;
223
224
if( initOutput )
225
{
226
s0 = _mm_set1_ps(bias0);
227
s1 = _mm_set1_ps(bias1);
228
s2 = _mm_set1_ps(bias2);
229
}
230
else
231
{
232
s0 = _mm_loadu_ps(outptr0 + j);
233
s1 = _mm_loadu_ps(outptr1 + j);
234
s2 = _mm_loadu_ps(outptr2 + j);
235
}
236
237
s0 = _mm_add_ps(s0, _mm256_castps256_ps128(t0));
238
s1 = _mm_add_ps(s1, _mm256_castps256_ps128(t1));
239
s2 = _mm_add_ps(s2, _mm256_castps256_ps128(t2));
240
241
if( relu )
242
{
243
__m128 m0 = _mm_cmp_ps(s0, z, _CMP_GT_OS);
244
__m128 m1 = _mm_cmp_ps(s1, z, _CMP_GT_OS);
245
__m128 m2 = _mm_cmp_ps(s2, z, _CMP_GT_OS);
246
s0 = _mm_xor_ps(s0, _mm_andnot_ps(m0, _mm_xor_ps(_mm_mul_ps(s0, vr0), s0)));
247
s1 = _mm_xor_ps(s1, _mm_andnot_ps(m1, _mm_xor_ps(_mm_mul_ps(s1, vr1), s1)));
248
s2 = _mm_xor_ps(s2, _mm_andnot_ps(m2, _mm_xor_ps(_mm_mul_ps(s2, vr2), s2)));
249
}
250
251
_mm_storeu_ps(outptr0 + j, s0);
252
_mm_storeu_ps(outptr1 + j, s1);
253
_mm_storeu_ps(outptr2 + j, s2);
254
}
255
256
for( ; j < blockSize; j++ )
257
{
258
const float* rptr = rowbuf + j*vecsize_aligned;
259
float s00, s10, s20;
260
261
if( initOutput )
262
{
263
s00 = bias0;
264
s10 = bias1;
265
s20 = bias2;
266
}
267
else
268
{
269
s00 = outptr0[j];
270
s10 = outptr1[j];
271
s20 = outptr2[j];
272
}
273
274
for( int k = 0; k < vecsize; k++ )
275
{
276
float r0 = rptr[k];
277
s00 += wptr0[k]*r0;
278
s10 += wptr1[k]*r0;
279
s20 += wptr2[k]*r0;
280
}
281
282
if( relu )
283
{
284
s00 = s00 > 0.f ? s00 : s00*r0;
285
s10 = s10 > 0.f ? s10 : s10*r1;
286
s20 = s20 > 0.f ? s20 : s20*r2;
287
}
288
289
outptr0[j] = s00;
290
outptr1[j] = s10;
291
outptr2[j] = s20;
292
}
293
}
294
_mm256_zeroupper();
295
}
296
297
// dst = vec * weights^t + bias
298
void fastGEMM1T( const float* vec, const float* weights,
299
size_t wstep, const float* bias,
300
float* dst, int nvecs, int vecsize )
301
{
302
int i = 0;
303
304
for( ; i <= nvecs - 8; i += 8 )
305
{
306
const float* wptr = weights + i*wstep;
307
__m256 vs0 = _mm256_setzero_ps(), vs1 = _mm256_setzero_ps(),
308
vs2 = _mm256_setzero_ps(), vs3 = _mm256_setzero_ps(),
309
vs4 = _mm256_setzero_ps(), vs5 = _mm256_setzero_ps(),
310
vs6 = _mm256_setzero_ps(), vs7 = _mm256_setzero_ps();
311
312
for( int k = 0; k < vecsize; k += 8, wptr += 8 )
313
{
314
__m256 v = _mm256_load_ps(vec + k);
315
316
vs0 = _mm256_fmadd_ps(_mm256_load_ps(wptr), v, vs0);
317
vs1 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep), v, vs1);
318
vs2 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*2), v, vs2);
319
vs3 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*3), v, vs3);
320
vs4 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*4), v, vs4);
321
vs5 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*5), v, vs5);
322
vs6 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*6), v, vs6);
323
vs7 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*7), v, vs7);
324
}
325
326
__m256 s0 = _mm256_hadd_ps(_mm256_hadd_ps(vs0, vs1), _mm256_hadd_ps(vs2, vs3));
327
__m256 s1 = _mm256_hadd_ps(_mm256_hadd_ps(vs4, vs5), _mm256_hadd_ps(vs6, vs7));
328
329
s0 = _mm256_add_ps(s0, _mm256_permute2f128_ps(s0, s0, 1));
330
s1 = _mm256_add_ps(s1, _mm256_permute2f128_ps(s1, s1, 1));
331
332
s0 = _mm256_add_ps(s0, _mm256_castps128_ps256(_mm_loadu_ps(bias + i)));
333
s1 = _mm256_add_ps(s1, _mm256_castps128_ps256(_mm_loadu_ps(bias + i + 4)));
334
335
_mm_storeu_ps(dst + i, _mm256_castps256_ps128(s0));
336
_mm_storeu_ps(dst + i + 4, _mm256_castps256_ps128(s1));
337
}
338
339
float temp = 0.f;
340
for( ; i < nvecs; i++ )
341
{
342
const float* wptr = weights + i*wstep;
343
__m256 vs0 = _mm256_setzero_ps();
344
345
for( int k = 0; k < vecsize; k += 8, wptr += 8 )
346
{
347
__m256 v = _mm256_load_ps(vec + k);
348
vs0 = _mm256_fmadd_ps(_mm256_load_ps(wptr), v, vs0);
349
}
350
351
__m256 s0 = _mm256_hadd_ps(_mm256_hadd_ps(vs0, vs0), vs0);
352
s0 = _mm256_add_ps(s0, _mm256_permute2f128_ps(s0, s0, 1));
353
_mm_store_ss(&temp, _mm256_castps256_ps128(s0));
354
dst[i] = temp + bias[i];
355
}
356
357
_mm256_zeroupper();
358
}
359
360
361
void fastGEMM( const float* aptr, size_t astep, const float* bptr,
362
size_t bstep, float* cptr, size_t cstep,
363
int ma, int na, int nb )
364
{
365
int n = 0;
366
367
#if CV_AVX512_SKX // AVX512VL is necessary to avoid register spilling
368
for( ; n <= nb - 32; n += 32 )
369
{
370
for( int m = 0; m < ma; m += 4 )
371
{
372
const float* aptr0 = aptr + astep*m;
373
const float* aptr1 = aptr + astep*std::min(m+1, ma-1);
374
const float* aptr2 = aptr + astep*std::min(m+2, ma-1);
375
const float* aptr3 = aptr + astep*std::min(m+3, ma-1);
376
377
float* cptr0 = cptr + cstep*m;
378
float* cptr1 = cptr + cstep*std::min(m+1, ma-1);
379
float* cptr2 = cptr + cstep*std::min(m+2, ma-1);
380
float* cptr3 = cptr + cstep*std::min(m+3, ma-1);
381
382
__m512 d00 = _mm512_setzero_ps(), d01 = _mm512_setzero_ps();
383
__m512 d10 = _mm512_setzero_ps(), d11 = _mm512_setzero_ps();
384
__m512 d20 = _mm512_setzero_ps(), d21 = _mm512_setzero_ps();
385
__m512 d30 = _mm512_setzero_ps(), d31 = _mm512_setzero_ps();
386
387
for( int k = 0; k < na; k++ )
388
{
389
__m512 a0 = _mm512_set1_ps(aptr0[k]);
390
__m512 a1 = _mm512_set1_ps(aptr1[k]);
391
__m512 a2 = _mm512_set1_ps(aptr2[k]);
392
__m512 a3 = _mm512_set1_ps(aptr3[k]);
393
__m512 b0 = _mm512_loadu_ps(bptr + k*bstep + n);
394
__m512 b1 = _mm512_loadu_ps(bptr + k*bstep + n + 16);
395
d00 = _mm512_fmadd_ps(a0, b0, d00);
396
d01 = _mm512_fmadd_ps(a0, b1, d01);
397
d10 = _mm512_fmadd_ps(a1, b0, d10);
398
d11 = _mm512_fmadd_ps(a1, b1, d11);
399
d20 = _mm512_fmadd_ps(a2, b0, d20);
400
d21 = _mm512_fmadd_ps(a2, b1, d21);
401
d30 = _mm512_fmadd_ps(a3, b0, d30);
402
d31 = _mm512_fmadd_ps(a3, b1, d31);
403
}
404
405
_mm512_storeu_ps(cptr0 + n, d00);
406
_mm512_storeu_ps(cptr0 + n + 16, d01);
407
_mm512_storeu_ps(cptr1 + n, d10);
408
_mm512_storeu_ps(cptr1 + n + 16, d11);
409
_mm512_storeu_ps(cptr2 + n, d20);
410
_mm512_storeu_ps(cptr2 + n + 16, d21);
411
_mm512_storeu_ps(cptr3 + n, d30);
412
_mm512_storeu_ps(cptr3 + n + 16, d31);
413
}
414
}
415
#endif
416
417
for( ; n <= nb - 16; n += 16 )
418
{
419
for( int m = 0; m < ma; m += 4 )
420
{
421
const float* aptr0 = aptr + astep*m;
422
const float* aptr1 = aptr + astep*std::min(m+1, ma-1);
423
const float* aptr2 = aptr + astep*std::min(m+2, ma-1);
424
const float* aptr3 = aptr + astep*std::min(m+3, ma-1);
425
426
float* cptr0 = cptr + cstep*m;
427
float* cptr1 = cptr + cstep*std::min(m+1, ma-1);
428
float* cptr2 = cptr + cstep*std::min(m+2, ma-1);
429
float* cptr3 = cptr + cstep*std::min(m+3, ma-1);
430
431
__m256 d00 = _mm256_setzero_ps(), d01 = _mm256_setzero_ps();
432
__m256 d10 = _mm256_setzero_ps(), d11 = _mm256_setzero_ps();
433
__m256 d20 = _mm256_setzero_ps(), d21 = _mm256_setzero_ps();
434
__m256 d30 = _mm256_setzero_ps(), d31 = _mm256_setzero_ps();
435
436
for( int k = 0; k < na; k++ )
437
{
438
__m256 a0 = _mm256_set1_ps(aptr0[k]);
439
__m256 a1 = _mm256_set1_ps(aptr1[k]);
440
__m256 a2 = _mm256_set1_ps(aptr2[k]);
441
__m256 a3 = _mm256_set1_ps(aptr3[k]);
442
__m256 b0 = _mm256_loadu_ps(bptr + k*bstep + n);
443
__m256 b1 = _mm256_loadu_ps(bptr + k*bstep + n + 8);
444
d00 = _mm256_fmadd_ps(a0, b0, d00);
445
d01 = _mm256_fmadd_ps(a0, b1, d01);
446
d10 = _mm256_fmadd_ps(a1, b0, d10);
447
d11 = _mm256_fmadd_ps(a1, b1, d11);
448
d20 = _mm256_fmadd_ps(a2, b0, d20);
449
d21 = _mm256_fmadd_ps(a2, b1, d21);
450
d30 = _mm256_fmadd_ps(a3, b0, d30);
451
d31 = _mm256_fmadd_ps(a3, b1, d31);
452
}
453
454
_mm256_storeu_ps(cptr0 + n, d00);
455
_mm256_storeu_ps(cptr0 + n + 8, d01);
456
_mm256_storeu_ps(cptr1 + n, d10);
457
_mm256_storeu_ps(cptr1 + n + 8, d11);
458
_mm256_storeu_ps(cptr2 + n, d20);
459
_mm256_storeu_ps(cptr2 + n + 8, d21);
460
_mm256_storeu_ps(cptr3 + n, d30);
461
_mm256_storeu_ps(cptr3 + n + 8, d31);
462
}
463
}
464
465
for( ; n < nb; n++ )
466
{
467
for( int m = 0; m < ma; m++ )
468
{
469
const float* aptr0 = aptr + astep*m;
470
float* cptr0 = cptr + cstep*m;
471
float d0 = 0.f;
472
473
for( int k = 0; k < na; k++ )
474
d0 += aptr0[k]*bptr[k*bstep + n];
475
476
cptr0[n] = d0;
477
}
478
}
479
_mm256_zeroupper();
480
}
481
482
#endif // CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
483
484
CV_CPU_OPTIMIZATION_NAMESPACE_END
485
}} // namespace
486
487