1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
16 // Third party copyrights are property of their respective owners.
18 // Redistribution and use in source and binary forms, with or without modification,
19 // are permitted provided that the following conditions are met:
21 // * Redistribution's of source code must retain the above copyright notice,
22 // this list of conditions and the following disclaimer.
24 // * Redistribution's in binary form must reproduce the above copyright notice,
25 // this list of conditions and the following disclaimer in the documentation
26 // and/or other materials provided with the distribution.
28 // * The name of the copyright holders may not be used to endorse or promote products
29 // derived from this software without specific prior written permission.
31 // This software is provided by the copyright holders and contributors "as is" and
32 // any express or implied warranties, including, but not limited to, the implied
33 // warranties of merchantability and fitness for a particular purpose are disclaimed.
34 // In no event shall the Intel Corporation or contributors be liable for any direct,
35 // indirect, incidental, special, exemplary, or consequential damages
36 // (including, but not limited to, procurement of substitute goods or services;
37 // loss of use, data, or profits; or business interruption) however caused
38 // and on any theory of liability, whether in contract, strict liability,
39 // or tort (including negligence or otherwise) arising in any way out of
40 // the use of this software, even if advised of the possibility of such damage.
44 #include "precomp.hpp"
47 #define CV_GEMM_BASELINE_ONLY
49 #define CV_MAHALANOBIS_BASELINE_ONLY
50 #define CV_MULTRANSPOSED_BASELINE_ONLY
54 // forward declarations
55 typedef void (*TransformFunc)(const uchar* src, uchar* dst, const uchar* m, int len, int scn, int dcn);
56 typedef void (*ScaleAddFunc)(const uchar* src1, const uchar* src2, uchar* dst, int len, const void* alpha);
57 typedef void (*MulTransposedFunc)(const Mat& src, const/*preallocated*/ Mat& dst, const Mat& delta, double scale);
58 typedef double (*MahalanobisImplFunc)(const Mat& v1, const Mat& v2, const Mat& icovar, double *diff_buffer /*[len]*/, int len /*=v1.total()*/);
60 CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
62 // forward declarations
64 void gemm32f(const float* src1, size_t src1_step, const float* src2, size_t src2_step,
65 float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step,
66 int m_a, int n_a, int n_d, int flags);
67 void gemm64f(const double* src1, size_t src1_step, const double* src2, size_t src2_step,
68 double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step,
69 int m_a, int n_a, int n_d, int flags);
70 void gemm32fc(const float* src1, size_t src1_step, const float* src2, size_t src2_step,
71 float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step,
72 int m_a, int n_a, int n_d, int flags);
73 void gemm64fc(const double* src1, size_t src1_step, const double* src2, size_t src2_step,
74 double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step,
75 int m_a, int n_a, int n_d, int flags);
77 TransformFunc getTransformFunc(int depth);
78 TransformFunc getDiagTransformFunc(int depth);
79 TransformFunc getPerspectiveTransform(int depth);
81 ScaleAddFunc getScaleAddFunc(int depth);
83 MahalanobisImplFunc getMahalanobisImplFunc(int depth);
85 MulTransposedFunc getMulTransposedFunc(int stype, int dtype, bool ata);
87 double dotProd_8u(const uchar* src1, const uchar* src2, int len);
88 double dotProd_8s(const schar* src1, const schar* src2, int len);
89 double dotProd_16u(const ushort* src1, const ushort* src2, int len);
90 double dotProd_16s(const short* src1, const short* src2, int len);
91 double dotProd_32s(const int* src1, const int* src2, int len);
92 double dotProd_32f(const float* src1, const float* src2, int len);
93 double dotProd_64f(const double* src1, const double* src2, int len);
97 #ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
99 #if !defined(CV_GEMM_BASELINE_ONLY) || defined(CV_CPU_BASELINE_MODE)
100 /****************************************************************************************\
102 \****************************************************************************************/
105 GEMM_CopyBlock( const uchar* src, size_t src_step,
106 uchar* dst, size_t dst_step,
107 Size size, size_t pix_size )
110 size.width *= (int)(pix_size / sizeof(int));
112 for( ; size.height--; src += src_step, dst += dst_step )
115 #if CV_ENABLE_UNROLLED
116 for( ; j <= size.width - 4; j += 4 )
118 int t0 = ((const int*)src)[j];
119 int t1 = ((const int*)src)[j+1];
121 ((int*)dst)[j+1] = t1;
122 t0 = ((const int*)src)[j+2];
123 t1 = ((const int*)src)[j+3];
124 ((int*)dst)[j+2] = t0;
125 ((int*)dst)[j+3] = t1;
128 for( ; j < size.width; j++ )
129 ((int*)dst)[j] = ((const int*)src)[j];
135 GEMM_TransposeBlock( const uchar* src, size_t src_step,
136 uchar* dst, size_t dst_step,
137 Size size, size_t pix_size )
140 for( i = 0; i < size.width; i++, dst += dst_step, src += pix_size )
142 const uchar* _src = src;
146 for( j = 0; j < size.height; j++, _src += src_step )
147 ((int*)dst)[j] = ((int*)_src)[0];
150 for( j = 0; j < size.height*2; j += 2, _src += src_step )
152 int t0 = ((int*)_src)[0];
153 int t1 = ((int*)_src)[1];
155 ((int*)dst)[j+1] = t1;
159 for( j = 0; j < size.height*4; j += 4, _src += src_step )
161 int t0 = ((int*)_src)[0];
162 int t1 = ((int*)_src)[1];
164 ((int*)dst)[j+1] = t1;
165 t0 = ((int*)_src)[2];
166 t1 = ((int*)_src)[3];
167 ((int*)dst)[j+2] = t0;
168 ((int*)dst)[j+3] = t1;
179 template<typename T, typename WT> static void
180 GEMMSingleMul( const T* a_data, size_t a_step,
181 const T* b_data, size_t b_step,
182 const T* c_data, size_t c_step,
183 T* d_data, size_t d_step,
184 Size a_size, Size d_size,
185 double alpha, double beta, int flags )
187 int i, j, k, n = a_size.width, m = d_size.width, drows = d_size.height;
188 const T *_a_data = a_data, *_b_data = b_data, *_c_data = c_data;
189 cv::AutoBuffer<T> _a_buf;
191 size_t a_step0, a_step1, c_step0, c_step1, t_step;
193 a_step /= sizeof(a_data[0]);
194 b_step /= sizeof(b_data[0]);
195 c_step /= sizeof(c_data[0]);
196 d_step /= sizeof(d_data[0]);
201 c_step0 = c_step1 = 0;
202 else if( !(flags & GEMM_3_T) )
203 c_step0 = c_step, c_step1 = 1;
205 c_step0 = 1, c_step1 = c_step;
207 if( flags & GEMM_1_T )
209 CV_SWAP( a_step0, a_step1, t_step );
211 if( a_step > 1 && n > 1 )
214 a_buf = _a_buf.data();
218 if( n == 1 ) /* external product */
220 cv::AutoBuffer<T> _b_buf;
223 if( a_step > 1 && a_size.height > 1 )
225 _a_buf.allocate(drows);
226 a_buf = _a_buf.data();
227 for( k = 0; k < drows; k++ )
228 a_buf[k] = a_data[a_step*k];
234 _b_buf.allocate(d_size.width);
235 b_buf = _b_buf.data();
236 for( j = 0; j < d_size.width; j++ )
237 b_buf[j] = b_data[j*b_step];
241 for( i = 0; i < drows; i++, _c_data += c_step0, d_data += d_step )
243 WT al = WT(a_data[i])*alpha;
245 for( j = 0; j <= d_size.width - 2; j += 2, c_data += 2*c_step1 )
247 WT s0 = al*WT(b_data[j]);
248 WT s1 = al*WT(b_data[j+1]);
256 d_data[j] = T(s0 + WT(c_data[0])*beta);
257 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
261 for( ; j < d_size.width; j++, c_data += c_step1 )
263 WT s0 = al*WT(b_data[j]);
267 d_data[j] = T(s0 + WT(c_data[0])*beta);
271 else if( flags & GEMM_2_T ) /* A * Bt */
273 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
281 for( k = 0; k < n; k++ )
282 a_buf[k] = a_data[a_step1*k];
286 for( j = 0; j < d_size.width; j++, b_data += b_step,
289 WT s0(0), s1(0), s2(0), s3(0);
291 #if CV_ENABLE_UNROLLED
292 for( ; k <= n - 4; k += 4 )
294 s0 += WT(a_data[k])*WT(b_data[k]);
295 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
296 s2 += WT(a_data[k+2])*WT(b_data[k+2]);
297 s3 += WT(a_data[k+3])*WT(b_data[k+3]);
301 s0 += WT(a_data[k])*WT(b_data[k]);
302 s0 = (s0+s1+s2+s3)*alpha;
307 d_data[j] = T(s0 + WT(c_data[0])*beta);
311 else if( d_size.width*sizeof(d_data[0]) <= 1600 )
313 for( i = 0; i < drows; i++, _a_data += a_step0,
317 a_data = _a_data, c_data = _c_data;
321 for( k = 0; k < n; k++ )
322 a_buf[k] = a_data[a_step1*k];
326 for( j = 0; j <= m - 4; j += 4, c_data += 4*c_step1 )
328 const T* b = _b_data + j;
329 WT s0(0), s1(0), s2(0), s3(0);
331 for( k = 0; k < n; k++, b += b_step )
334 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
335 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
340 d_data[j] = T(s0*alpha);
341 d_data[j+1] = T(s1*alpha);
342 d_data[j+2] = T(s2*alpha);
343 d_data[j+3] = T(s3*alpha);
347 s0 = s0*alpha; s1 = s1*alpha;
348 s2 = s2*alpha; s3 = s3*alpha;
349 d_data[j] = T(s0 + WT(c_data[0])*beta);
350 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
351 d_data[j+2] = T(s2 + WT(c_data[c_step1*2])*beta);
352 d_data[j+3] = T(s3 + WT(c_data[c_step1*3])*beta);
356 for( ; j < m; j++, c_data += c_step1 )
358 const T* b = _b_data + j;
361 for( k = 0; k < n; k++, b += b_step )
362 s0 += WT(a_data[k]) * WT(b[0]);
368 d_data[j] = T(s0 + WT(c_data[0])*beta);
374 cv::AutoBuffer<WT> _d_buf(m);
375 WT* d_buf = _d_buf.data();
377 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
385 for( k = 0; k < n; k++ )
386 a_buf[k] = _a_data[a_step1*k];
390 for( j = 0; j < m; j++ )
393 for( k = 0; k < n; k++, b_data += b_step )
397 #if CV_ENABLE_UNROLLED
398 for(; j <= m - 4; j += 4 )
400 WT t0 = d_buf[j] + WT(b_data[j])*al;
401 WT t1 = d_buf[j+1] + WT(b_data[j+1])*al;
404 t0 = d_buf[j+2] + WT(b_data[j+2])*al;
405 t1 = d_buf[j+3] + WT(b_data[j+3])*al;
411 d_buf[j] += WT(b_data[j])*al;
415 for( j = 0; j < m; j++ )
416 d_data[j] = T(d_buf[j]*alpha);
418 for( j = 0; j < m; j++, c_data += c_step1 )
420 WT t = d_buf[j]*alpha;
421 d_data[j] = T(t + WT(c_data[0])*beta);
428 template<typename T, typename WT> static void
429 GEMMBlockMul( const T* a_data, size_t a_step,
430 const T* b_data, size_t b_step,
431 WT* d_data, size_t d_step,
432 Size a_size, Size d_size, int flags )
434 int i, j, k, n = a_size.width, m = d_size.width;
435 const T *_a_data = a_data, *_b_data = b_data;
436 cv::AutoBuffer<T> _a_buf;
438 size_t a_step0, a_step1, t_step;
439 int do_acc = flags & 16;
441 a_step /= sizeof(a_data[0]);
442 b_step /= sizeof(b_data[0]);
443 d_step /= sizeof(d_data[0]);
448 if( flags & GEMM_1_T )
450 CV_SWAP( a_step0, a_step1, t_step );
453 a_buf = _a_buf.data();
456 if( flags & GEMM_2_T )
458 /* second operand is transposed */
459 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
461 a_data = _a_data; b_data = _b_data;
465 for( k = 0; k < n; k++ )
466 a_buf[k] = a_data[a_step1*k];
470 for( j = 0; j < d_size.width; j++, b_data += b_step )
472 WT s0 = do_acc ? d_data[j]:WT(0), s1(0);
473 for( k = 0; k <= n - 2; k += 2 )
475 s0 += WT(a_data[k])*WT(b_data[k]);
476 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
480 s0 += WT(a_data[k])*WT(b_data[k]);
488 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
490 a_data = _a_data, b_data = _b_data;
494 for( k = 0; k < n; k++ )
495 a_buf[k] = a_data[a_step1*k];
499 for( j = 0; j <= m - 4; j += 4 )
502 const T* b = b_data + j;
506 s0 = d_data[j]; s1 = d_data[j+1];
507 s2 = d_data[j+2]; s3 = d_data[j+3];
510 s0 = s1 = s2 = s3 = WT(0);
512 for( k = 0; k < n; k++, b += b_step )
515 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
516 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
519 d_data[j] = s0; d_data[j+1] = s1;
520 d_data[j+2] = s2; d_data[j+3] = s3;
525 const T* b = b_data + j;
526 WT s0 = do_acc ? d_data[j] : WT(0);
528 for( k = 0; k < n; k++, b += b_step )
529 s0 += WT(a_data[k]) * WT(b[0]);
538 template<typename T, typename WT> static void
539 GEMMStore( const T* c_data, size_t c_step,
540 const WT* d_buf, size_t d_buf_step,
541 T* d_data, size_t d_step, Size d_size,
542 double alpha, double beta, int flags )
544 const T* _c_data = c_data;
546 size_t c_step0, c_step1;
548 c_step /= sizeof(c_data[0]);
549 d_buf_step /= sizeof(d_buf[0]);
550 d_step /= sizeof(d_data[0]);
553 c_step0 = c_step1 = 0;
554 else if( !(flags & GEMM_3_T) )
555 c_step0 = c_step, c_step1 = 1;
557 c_step0 = 1, c_step1 = c_step;
559 for( ; d_size.height--; _c_data += c_step0, d_buf += d_buf_step, d_data += d_step )
565 #if CV_ENABLE_UNROLLED
566 for(; j <= d_size.width - 4; j += 4, c_data += 4*c_step1 )
568 WT t0 = alpha*d_buf[j];
569 WT t1 = alpha*d_buf[j+1];
570 t0 += beta*WT(c_data[0]);
571 t1 += beta*WT(c_data[c_step1]);
574 t0 = alpha*d_buf[j+2];
575 t1 = alpha*d_buf[j+3];
576 t0 += beta*WT(c_data[c_step1*2]);
577 t1 += beta*WT(c_data[c_step1*3]);
582 for( ; j < d_size.width; j++, c_data += c_step1 )
584 WT t0 = alpha*d_buf[j];
585 d_data[j] = T(t0 + WT(c_data[0])*beta);
591 #if CV_ENABLE_UNROLLED
592 for( ; j <= d_size.width - 4; j += 4 )
594 WT t0 = alpha*d_buf[j];
595 WT t1 = alpha*d_buf[j+1];
598 t0 = alpha*d_buf[j+2];
599 t1 = alpha*d_buf[j+3];
604 for( ; j < d_size.width; j++ )
605 d_data[j] = T(alpha*d_buf[j]);
611 typedef void (*GEMMSingleMulFunc)( const void* src1, size_t step1,
612 const void* src2, size_t step2, const void* src3, size_t step3,
613 void* dst, size_t dststep, Size srcsize, Size dstsize,
614 double alpha, double beta, int flags );
616 typedef void (*GEMMBlockMulFunc)( const void* src1, size_t step1,
617 const void* src2, size_t step2, void* dst, size_t dststep,
618 Size srcsize, Size dstsize, int flags );
620 typedef void (*GEMMStoreFunc)( const void* src1, size_t step1,
621 const void* src2, size_t step2, void* dst, size_t dststep,
622 Size dstsize, double alpha, double beta, int flags );
624 static void GEMMSingleMul_32f( const float* a_data, size_t a_step,
625 const float* b_data, size_t b_step,
626 const float* c_data, size_t c_step,
627 float* d_data, size_t d_step,
628 Size a_size, Size d_size,
629 double alpha, double beta, int flags )
631 GEMMSingleMul<float,double>(a_data, a_step, b_data, b_step, c_data,
632 c_step, d_data, d_step, a_size, d_size,
636 static void GEMMSingleMul_64f( const double* a_data, size_t a_step,
637 const double* b_data, size_t b_step,
638 const double* c_data, size_t c_step,
639 double* d_data, size_t d_step,
640 Size a_size, Size d_size,
641 double alpha, double beta, int flags )
643 GEMMSingleMul<double,double>(a_data, a_step, b_data, b_step, c_data,
644 c_step, d_data, d_step, a_size, d_size,
649 static void GEMMSingleMul_32fc( const Complexf* a_data, size_t a_step,
650 const Complexf* b_data, size_t b_step,
651 const Complexf* c_data, size_t c_step,
652 Complexf* d_data, size_t d_step,
653 Size a_size, Size d_size,
654 double alpha, double beta, int flags )
656 GEMMSingleMul<Complexf,Complexd>(a_data, a_step, b_data, b_step, c_data,
657 c_step, d_data, d_step, a_size, d_size,
661 static void GEMMSingleMul_64fc( const Complexd* a_data, size_t a_step,
662 const Complexd* b_data, size_t b_step,
663 const Complexd* c_data, size_t c_step,
664 Complexd* d_data, size_t d_step,
665 Size a_size, Size d_size,
666 double alpha, double beta, int flags )
668 GEMMSingleMul<Complexd,Complexd>(a_data, a_step, b_data, b_step, c_data,
669 c_step, d_data, d_step, a_size, d_size,
673 static void GEMMBlockMul_32f( const float* a_data, size_t a_step,
674 const float* b_data, size_t b_step,
675 double* d_data, size_t d_step,
676 Size a_size, Size d_size, int flags )
678 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
682 static void GEMMBlockMul_64f( const double* a_data, size_t a_step,
683 const double* b_data, size_t b_step,
684 double* d_data, size_t d_step,
685 Size a_size, Size d_size, int flags )
687 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
691 static void GEMMBlockMul_32fc( const Complexf* a_data, size_t a_step,
692 const Complexf* b_data, size_t b_step,
693 Complexd* d_data, size_t d_step,
694 Size a_size, Size d_size, int flags )
696 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
700 static void GEMMBlockMul_64fc( const Complexd* a_data, size_t a_step,
701 const Complexd* b_data, size_t b_step,
702 Complexd* d_data, size_t d_step,
703 Size a_size, Size d_size, int flags )
705 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
709 static void GEMMStore_32f( const float* c_data, size_t c_step,
710 const double* d_buf, size_t d_buf_step,
711 float* d_data, size_t d_step, Size d_size,
712 double alpha, double beta, int flags )
714 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
718 static void GEMMStore_64f( const double* c_data, size_t c_step,
719 const double* d_buf, size_t d_buf_step,
720 double* d_data, size_t d_step, Size d_size,
721 double alpha, double beta, int flags )
723 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
727 static void GEMMStore_32fc( const Complexf* c_data, size_t c_step,
728 const Complexd* d_buf, size_t d_buf_step,
729 Complexf* d_data, size_t d_step, Size d_size,
730 double alpha, double beta, int flags )
732 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
736 static void GEMMStore_64fc( const Complexd* c_data, size_t c_step,
737 const Complexd* d_buf, size_t d_buf_step,
738 Complexd* d_data, size_t d_step, Size d_size,
739 double alpha, double beta, int flags )
741 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
744 static void gemmImpl( Mat A, Mat B, double alpha,
745 Mat C, double beta, Mat D, int flags )
747 CV_INSTRUMENT_REGION();
749 const int block_lin_size = 128;
750 const int block_size = block_lin_size * block_lin_size;
752 static double zero[] = {0,0,0,0};
753 static float zerof[] = {0,0,0,0};
755 Size a_size = A.size(), d_size;
756 int i, len = 0, type = A.type();
758 switch( flags & (GEMM_1_T|GEMM_2_T) )
761 d_size = Size( B.cols, a_size.height );
765 d_size = Size( B.cols, a_size.width );
769 d_size = Size( B.rows, a_size.height );
773 d_size = Size( B.rows, a_size.width );
778 if( flags == 0 && 2 <= len && len <= 4 && (len == d_size.width || len == d_size.height) )
782 float* d = D.ptr<float>();
783 const float *a = A.ptr<float>(),
785 *c = (const float*)C.data;
786 size_t d_step = D.step/sizeof(d[0]),
787 a_step = A.step/sizeof(a[0]),
788 b_step = B.step/sizeof(b[0]),
789 c_step = C.data ? C.step/sizeof(c[0]) : 0;
797 if( len == d_size.width && b != d )
799 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
801 float t0 = a[0]*b[0] + a[1]*b[b_step];
802 float t1 = a[0]*b[1] + a[1]*b[b_step+1];
803 d[0] = (float)(t0*alpha + c[0]*beta);
804 d[1] = (float)(t1*alpha + c[1]*beta);
816 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
818 float t0 = a[0]*b[0] + a[1]*b[b_step];
819 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
820 d[0] = (float)(t0*alpha + c[0]*beta);
821 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
828 if( len == d_size.width && b != d )
830 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
832 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
833 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
834 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
835 d[0] = (float)(t0*alpha + c[0]*beta);
836 d[1] = (float)(t1*alpha + c[1]*beta);
837 d[2] = (float)(t2*alpha + c[2]*beta);
849 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
851 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
852 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
853 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
855 d[0] = (float)(t0*alpha + c[0]*beta);
856 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
857 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
864 if( len == d_size.width && b != d )
866 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
868 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
869 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
870 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
871 float t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
872 d[0] = (float)(t0*alpha + c[0]*beta);
873 d[1] = (float)(t1*alpha + c[1]*beta);
874 d[2] = (float)(t2*alpha + c[2]*beta);
875 d[3] = (float)(t3*alpha + c[3]*beta);
878 else if( len <= 16 && a != d )
887 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
889 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
890 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
891 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
892 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
893 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
894 float t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
895 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
896 d[0] = (float)(t0*alpha + c[0]*beta);
897 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
898 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
899 d[d_step*3] = (float)(t3*alpha + c[c_step*3]*beta);
910 double* d = D.ptr<double>();
911 const double *a = A.ptr<double>(),
912 *b = B.ptr<double>(),
913 *c = (const double*)C.data;
914 size_t d_step = D.step/sizeof(d[0]),
915 a_step = A.step/sizeof(a[0]),
916 b_step = B.step/sizeof(b[0]),
917 c_step = C.data ? C.step/sizeof(c[0]) : 0;
924 if( len == d_size.width && b != d )
926 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
928 double t0 = a[0]*b[0] + a[1]*b[b_step];
929 double t1 = a[0]*b[1] + a[1]*b[b_step+1];
930 d[0] = t0*alpha + c[0]*beta;
931 d[1] = t1*alpha + c[1]*beta;
943 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
945 double t0 = a[0]*b[0] + a[1]*b[b_step];
946 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
947 d[0] = t0*alpha + c[0]*beta;
948 d[d_step] = t1*alpha + c[c_step]*beta;
955 if( len == d_size.width && b != d )
957 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
959 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
960 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
961 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
962 d[0] = t0*alpha + c[0]*beta;
963 d[1] = t1*alpha + c[1]*beta;
964 d[2] = t2*alpha + c[2]*beta;
976 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
978 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
979 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
980 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
982 d[0] = t0*alpha + c[0]*beta;
983 d[d_step] = t1*alpha + c[c_step]*beta;
984 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
991 if( len == d_size.width && b != d )
993 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
995 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
996 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
997 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
998 double t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
999 d[0] = t0*alpha + c[0]*beta;
1000 d[1] = t1*alpha + c[1]*beta;
1001 d[2] = t2*alpha + c[2]*beta;
1002 d[3] = t3*alpha + c[3]*beta;
1005 else if( d_size.width <= 16 && a != d )
1014 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1016 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1017 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
1018 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
1019 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
1020 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
1021 double t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
1022 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
1023 d[0] = t0*alpha + c[0]*beta;
1024 d[d_step] = t1*alpha + c[c_step]*beta;
1025 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
1026 d[d_step*3] = t3*alpha + c[c_step*3]*beta;
1037 size_t b_step = B.step;
1038 GEMMSingleMulFunc singleMulFunc;
1039 GEMMBlockMulFunc blockMulFunc;
1040 GEMMStoreFunc storeFunc;
1042 const uchar* Cdata = C.data;
1043 size_t Cstep = C.data ? (size_t)C.step : 0;
1044 AutoBuffer<uchar> buf;
1046 if( type == CV_32FC1 )
1048 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32f;
1049 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32f;
1050 storeFunc = (GEMMStoreFunc)GEMMStore_32f;
1052 else if( type == CV_64FC1 )
1054 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64f;
1055 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64f;
1056 storeFunc = (GEMMStoreFunc)GEMMStore_64f;
1058 else if( type == CV_32FC2 )
1060 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32fc;
1061 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32fc;
1062 storeFunc = (GEMMStoreFunc)GEMMStore_32fc;
1066 CV_Assert( type == CV_64FC2 );
1067 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64fc;
1068 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64fc;
1069 storeFunc = (GEMMStoreFunc)GEMMStore_64fc;
1072 if( (d_size.width == 1 || len == 1) && !(flags & GEMM_2_T) && B.isContinuous() )
1074 b_step = d_size.width == 1 ? 0 : CV_ELEM_SIZE(type);
1078 /*if( (d_size.width | d_size.height | len) >= 16 && icvBLAS_GEMM_32f_p != 0 )
1080 blas_func = type == CV_32FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32f_p :
1081 type == CV_64FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64f_p :
1082 type == CV_32FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32fc_p :
1083 type == CV_64FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64fc_p : 0;
1088 const char* transa = flags & GEMM_1_T ? "t" : "n";
1089 const char* transb = flags & GEMM_2_T ? "t" : "n";
1094 if( C->data.ptr != D->data.ptr )
1096 if( !(flags & GEMM_3_T) )
1099 cvTranspose( C, D );
1103 if( CV_MAT_DEPTH(type) == CV_32F )
1105 Complex32f _alpha, _beta;
1107 lda = A->step/sizeof(float);
1108 ldb = b_step/sizeof(float);
1109 ldd = D->step/sizeof(float);
1110 _alpha.re = (float)alpha;
1112 _beta.re = C->data.ptr ? (float)beta : 0;
1114 if( CV_MAT_CN(type) == 2 )
1115 lda /= 2, ldb /= 2, ldd /= 2;
1117 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1118 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1119 &_beta, D->data.ptr, &ldd );
1123 CvComplex64f _alpha, _beta;
1125 lda = A->step/sizeof(double);
1126 ldb = b_step/sizeof(double);
1127 ldd = D->step/sizeof(double);
1130 _beta.re = C->data.ptr ? beta : 0;
1132 if( CV_MAT_CN(type) == 2 )
1133 lda /= 2, ldb /= 2, ldd /= 2;
1135 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1136 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1137 &_beta, D->data.ptr, &ldd );
1140 else*/ if( ((d_size.height <= block_lin_size/2 || d_size.width <= block_lin_size/2) &&
1141 len <= 10000) || len <= 10 ||
1142 (d_size.width <= block_lin_size &&
1143 d_size.height <= block_lin_size && len <= block_lin_size) )
1145 singleMulFunc( A.ptr(), A.step, B.ptr(), b_step, Cdata, Cstep,
1146 matD->ptr(), matD->step, a_size, d_size, alpha, beta, flags );
1150 int is_a_t = flags & GEMM_1_T;
1151 int is_b_t = flags & GEMM_2_T;
1152 int elem_size = CV_ELEM_SIZE(type);
1154 size_t a_buf_size = 0, b_buf_size, d_buf_size;
1158 int j, k, di = 0, dj = 0, dk = 0;
1160 size_t a_step0, a_step1, b_step0, b_step1, c_step0, c_step1;
1161 int work_elem_size = elem_size << (CV_MAT_DEPTH(type) == CV_32F ? 1 : 0);
1164 a_step0 = A.step, a_step1 = elem_size;
1166 a_step0 = elem_size, a_step1 = A.step;
1169 b_step0 = b_step, b_step1 = elem_size;
1171 b_step0 = elem_size, b_step1 = b_step;
1175 c_step0 = c_step1 = 0;
1178 else if( !(flags & GEMM_3_T) )
1179 c_step0 = C.step, c_step1 = elem_size;
1181 c_step0 = elem_size, c_step1 = C.step;
1183 dm0 = std::min( block_lin_size, d_size.height );
1184 dn0 = std::min( block_lin_size, d_size.width );
1185 dk0_1 = block_size / dm0;
1186 dk0_2 = block_size / dn0;
1187 dk0 = std::min( dk0_1, dk0_2 );
1188 dk0 = std::min( dk0, len );
1189 if( dk0*dm0 > block_size )
1190 dm0 = block_size / dk0;
1191 if( dk0*dn0 > block_size )
1192 dn0 = block_size / dk0;
1194 dk0_1 = (dn0+dn0/8+2) & -2;
1195 b_buf_size = (size_t)(dk0+dk0/8+1)*dk0_1*elem_size;
1196 d_buf_size = (size_t)(dk0+dk0/8+1)*dk0_1*work_elem_size;
1200 a_buf_size = (size_t)(dm0+dm0/8+1)*((dk0+dk0/8+2)&-2)*elem_size;
1204 buf.allocate(d_buf_size + b_buf_size + a_buf_size);
1206 b_buf = d_buf + d_buf_size;
1209 a_buf = b_buf + b_buf_size;
1211 for( i = 0; i < d_size.height; i += di )
1214 if( i + di >= d_size.height || 8*(i + di) + di > 8*d_size.height )
1215 di = d_size.height - i;
1217 for( j = 0; j < d_size.width; j += dj )
1219 uchar* _d = matD->ptr() + i*matD->step + j*elem_size;
1220 const uchar* _c = Cdata + i*c_step0 + j*c_step1;
1221 size_t _d_step = matD->step;
1224 if( j + dj >= d_size.width || 8*(j + dj) + dj > 8*d_size.width )
1225 dj = d_size.width - j;
1231 _d_step = dj*work_elem_size;
1234 for( k = 0; k < len; k += dk )
1236 const uchar* _a = A.ptr() + i*a_step0 + k*a_step1;
1237 size_t _a_step = A.step;
1238 const uchar* _b = B.ptr() + k*b_step0 + j*b_step1;
1239 size_t _b_step = b_step;
1243 if( k + dk >= len || 8*(k + dk) + dk > 8*len )
1247 a_bl_size.width = dk, a_bl_size.height = di;
1249 a_bl_size.width = di, a_bl_size.height = dk;
1251 if( a_buf && is_a_t )
1253 _a_step = dk*elem_size;
1254 GEMM_TransposeBlock( _a, A.step, a_buf, _a_step, a_bl_size, elem_size );
1255 std::swap( a_bl_size.width, a_bl_size.height );
1259 if( dj < d_size.width )
1263 b_size.width = dj, b_size.height = dk;
1265 b_size.width = dk, b_size.height = dj;
1267 _b_step = b_size.width*elem_size;
1268 GEMM_CopyBlock( _b, b_step, b_buf, _b_step, b_size, elem_size );
1273 blockMulFunc( _a, _a_step, _b, _b_step, _d, _d_step,
1274 a_bl_size, Size(dj,di), flags );
1276 singleMulFunc( _a, _a_step, _b, _b_step, _c, Cstep,
1277 _d, _d_step, a_bl_size, Size(dj,di), alpha, beta, flags );
1282 storeFunc( _c, Cstep, _d, _d_step,
1283 matD->ptr(i) + j*elem_size,
1284 matD->step, Size(dj,di), alpha, beta, flags );
1291 template <typename fptype>inline static void
1292 callGemmImpl(const fptype *src1, size_t src1_step, const fptype *src2, size_t src2_step, fptype alpha,
1293 const fptype *src3, size_t src3_step, fptype beta, fptype *dst, size_t dst_step, int m_a, int n_a, int n_d, int flags, int type)
1295 CV_StaticAssert(GEMM_1_T == CV_HAL_GEMM_1_T, "Incompatible GEMM_1_T flag in HAL");
1296 CV_StaticAssert(GEMM_2_T == CV_HAL_GEMM_2_T, "Incompatible GEMM_2_T flag in HAL");
1297 CV_StaticAssert(GEMM_3_T == CV_HAL_GEMM_3_T, "Incompatible GEMM_3_T flag in HAL");
1299 int b_m, b_n, c_m, c_n, m_d;
1301 if(flags & GEMM_2_T)
1304 if(flags & GEMM_1_T )
1318 if(flags & GEMM_1_T )
1330 if(flags & GEMM_3_T)
1343 A = Mat(m_a, n_a, type, (void*)src1, src1_step);
1345 B = Mat(b_m, b_n, type, (void*)src2, src2_step);
1346 if(src3 != NULL && beta != 0.0)
1347 C = Mat(c_m, c_n, type, (void*)src3, src3_step);
1348 Mat D(m_d, n_d, type, (void*)dst, dst_step);
1350 gemmImpl(A, B, alpha, C, beta, D, flags);
1353 void gemm32f(const float* src1, size_t src1_step, const float* src2, size_t src2_step,
1354 float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step,
1355 int m_a, int n_a, int n_d, int flags)
1357 CV_INSTRUMENT_REGION();
1358 callGemmImpl(src1, src1_step, src2, src2_step, alpha, src3, src3_step, beta, dst, dst_step, m_a, n_a, n_d, flags, CV_32F);
1361 void gemm64f(const double* src1, size_t src1_step, const double* src2, size_t src2_step,
1362 double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step,
1363 int m_a, int n_a, int n_d, int flags)
1365 CV_INSTRUMENT_REGION();
1366 callGemmImpl(src1, src1_step, src2, src2_step, alpha, src3, src3_step, beta, dst, dst_step, m_a, n_a, n_d, flags, CV_64F);
1369 void gemm32fc(const float* src1, size_t src1_step, const float* src2, size_t src2_step,
1370 float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step,
1371 int m_a, int n_a, int n_d, int flags)
1373 CV_INSTRUMENT_REGION();
1374 callGemmImpl(src1, src1_step, src2, src2_step, alpha, src3, src3_step, beta, dst, dst_step, m_a, n_a, n_d, flags, CV_32FC2);
1377 void gemm64fc(const double* src1, size_t src1_step, const double* src2, size_t src2_step,
1378 double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step,
1379 int m_a, int n_a, int n_d, int flags)
1381 CV_INSTRUMENT_REGION();
1382 callGemmImpl(src1, src1_step, src2, src2_step, alpha, src3, src3_step, beta, dst, dst_step, m_a, n_a, n_d, flags, CV_64FC2);
1385 #endif // !defined(CV_GEMM_BASELINE_ONLY) || defined(CV_CPU_BASELINE_MODE)
1389 /****************************************************************************************\
1391 \****************************************************************************************/
1393 template<typename T, typename WT> static void
1394 transform_( const T* src, T* dst, const WT* m, int len, int scn, int dcn )
1398 if( scn == 2 && dcn == 2 )
1400 for( x = 0; x < len*2; x += 2 )
1402 WT v0 = src[x], v1 = src[x+1];
1403 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]);
1404 T t1 = saturate_cast<T>(m[3]*v0 + m[4]*v1 + m[5]);
1405 dst[x] = t0; dst[x+1] = t1;
1408 else if( scn == 3 && dcn == 3 )
1410 for( x = 0; x < len*3; x += 3 )
1412 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1413 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1414 T t1 = saturate_cast<T>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1415 T t2 = saturate_cast<T>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1416 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1419 else if( scn == 3 && dcn == 1 )
1421 for( x = 0; x < len; x++, src += 3 )
1422 dst[x] = saturate_cast<T>(m[0]*src[0] + m[1]*src[1] + m[2]*src[2] + m[3]);
1424 else if( scn == 4 && dcn == 4 )
1426 for( x = 0; x < len*4; x += 4 )
1428 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2], v3 = src[x+3];
1429 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]*v3 + m[4]);
1430 T t1 = saturate_cast<T>(m[5]*v0 + m[6]*v1 + m[7]*v2 + m[8]*v3 + m[9]);
1431 dst[x] = t0; dst[x+1] = t1;
1432 t0 = saturate_cast<T>(m[10]*v0 + m[11]*v1 + m[12]*v2 + m[13]*v3 + m[14]);
1433 t1 = saturate_cast<T>(m[15]*v0 + m[16]*v1 + m[17]*v2 + m[18]*v3 + m[19]);
1434 dst[x+2] = t0; dst[x+3] = t1;
1439 for( x = 0; x < len; x++, src += scn, dst += dcn )
1443 for( j = 0; j < dcn; j++, _m += scn + 1 )
1446 for( k = 0; k < scn; k++ )
1448 dst[j] = saturate_cast<T>(s);
1455 transform_8u( const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn )
1458 const int BITS = 10, SCALE = 1 << BITS;
1459 const float MAX_M = (float)(1 << (15 - BITS));
1461 if( scn == 3 && dcn == 3 &&
1462 std::abs(m[0]) < MAX_M && std::abs(m[1]) < MAX_M && std::abs(m[ 2]) < MAX_M*256 && std::abs(m[ 3]) < MAX_M*256 &&
1463 std::abs(m[4]) < MAX_M && std::abs(m[5]) < MAX_M && std::abs(m[ 6]) < MAX_M*256 && std::abs(m[ 7]) < MAX_M*256 &&
1464 std::abs(m[8]) < MAX_M && std::abs(m[9]) < MAX_M && std::abs(m[10]) < MAX_M*256 && std::abs(m[11]) < MAX_M*256 )
1466 const int nChannels = 3;
1472 m16.s[0] = saturate_cast<short>(m[0] * SCALE); m16.s[1] = saturate_cast<short>(m[1] * SCALE);
1473 m16.s[2] = saturate_cast<short>(m[4] * SCALE); m16.s[3] = saturate_cast<short>(m[5] * SCALE);
1474 m16.s[4] = saturate_cast<short>(m[8] * SCALE); m16.s[5] = saturate_cast<short>(m[9] * SCALE);
1475 int m32[] = {saturate_cast<int>(m[ 2] * SCALE), saturate_cast<int>(m[ 3] * SCALE),
1476 saturate_cast<int>(m[ 6] * SCALE), saturate_cast<int>(m[ 7] * SCALE),
1477 saturate_cast<int>(m[10] * SCALE), saturate_cast<int>(m[11] * SCALE)};
1478 v_int16 m01 = v_reinterpret_as_s16(vx_setall_s32(m16.p[0]));
1479 v_int32 m2 = vx_setall_s32(m32[0]);
1480 v_int32 m3 = vx_setall_s32(m32[1]);
1481 v_int16 m45 = v_reinterpret_as_s16(vx_setall_s32(m16.p[1]));
1482 v_int32 m6 = vx_setall_s32(m32[2]);
1483 v_int32 m7 = vx_setall_s32(m32[3]);
1484 v_int16 m89 = v_reinterpret_as_s16(vx_setall_s32(m16.p[2]));
1485 v_int32 m10 = vx_setall_s32(m32[4]);
1486 v_int32 m11 = vx_setall_s32(m32[5]);
1488 for (; x <= (len - v_uint8::nlanes) * nChannels; x += v_uint8::nlanes * nChannels)
1491 v_load_deinterleave(src + x, b, g, r);
1493 v_zip(b, g, bgl, bgh);
1495 v_expand(r, rl, rh);
1497 v_int16 dbl, dbh, dgl, dgh, drl, drh;
1500 v_expand(bgl, p0, p2);
1501 v_expand(v_reinterpret_as_s16(rl), p1, p3);
1502 dbl = v_rshr_pack<BITS>(v_dotprod(v_reinterpret_as_s16(p0), m01) + p1 * m2 + m3,
1503 v_dotprod(v_reinterpret_as_s16(p2), m01) + p3 * m2 + m3);
1504 dgl = v_rshr_pack<BITS>(v_dotprod(v_reinterpret_as_s16(p0), m45) + p1 * m6 + m7,
1505 v_dotprod(v_reinterpret_as_s16(p2), m45) + p3 * m6 + m7);
1506 drl = v_rshr_pack<BITS>(v_dotprod(v_reinterpret_as_s16(p0), m89) + p1 * m10 + m11,
1507 v_dotprod(v_reinterpret_as_s16(p2), m89) + p3 * m10 + m11);
1508 v_expand(bgh, p0, p2);
1509 v_expand(v_reinterpret_as_s16(rh), p1, p3);
1510 dbh = v_rshr_pack<BITS>(v_dotprod(v_reinterpret_as_s16(p0), m01) + p1 * m2 + m3,
1511 v_dotprod(v_reinterpret_as_s16(p2), m01) + p3 * m2 + m3);
1512 dgh = v_rshr_pack<BITS>(v_dotprod(v_reinterpret_as_s16(p0), m45) + p1 * m6 + m7,
1513 v_dotprod(v_reinterpret_as_s16(p2), m45) + p3 * m6 + m7);
1514 drh = v_rshr_pack<BITS>(v_dotprod(v_reinterpret_as_s16(p0), m89) + p1 * m10 + m11,
1515 v_dotprod(v_reinterpret_as_s16(p2), m89) + p3 * m10 + m11);
1516 v_store_interleave(dst + x, v_pack_u(dbl, dbh), v_pack_u(dgl, dgh), v_pack_u(drl, drh));
1518 m32[1] = saturate_cast<int>((m[3] + 0.5f)*SCALE);
1519 m32[3] = saturate_cast<int>((m[7] + 0.5f)*SCALE);
1520 m32[5] = saturate_cast<int>((m[11] + 0.5f)*SCALE);
1521 for( ; x < len * nChannels; x += nChannels )
1523 int v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1524 uchar t0 = saturate_cast<uchar>((m16.s[0] * v0 + m16.s[1] * v1 + m32[0] * v2 + m32[1]) >> BITS);
1525 uchar t1 = saturate_cast<uchar>((m16.s[2] * v0 + m16.s[3] * v1 + m32[2] * v2 + m32[3]) >> BITS);
1526 uchar t2 = saturate_cast<uchar>((m16.s[4] * v0 + m16.s[5] * v1 + m32[4] * v2 + m32[5]) >> BITS);
1527 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1534 transform_(src, dst, m, len, scn, dcn);
1538 transform_16u( const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn )
1540 #if CV_SIMD && !defined(__aarch64__)
1541 if( scn == 3 && dcn == 3 )
1544 #if CV_SIMD_WIDTH > 16
1545 v_float32 m0 = vx_setall_f32(m[ 0]);
1546 v_float32 m1 = vx_setall_f32(m[ 1]);
1547 v_float32 m2 = vx_setall_f32(m[ 2]);
1548 v_float32 m3 = vx_setall_f32(m[ 3] - 32768.f);
1549 v_float32 m4 = vx_setall_f32(m[ 4]);
1550 v_float32 m5 = vx_setall_f32(m[ 5]);
1551 v_float32 m6 = vx_setall_f32(m[ 6]);
1552 v_float32 m7 = vx_setall_f32(m[ 7] - 32768.f);
1553 v_float32 m8 = vx_setall_f32(m[ 8]);
1554 v_float32 m9 = vx_setall_f32(m[ 9]);
1555 v_float32 m10 = vx_setall_f32(m[10]);
1556 v_float32 m11 = vx_setall_f32(m[11] - 32768.f);
1557 v_int16 delta = vx_setall_s16(-32768);
1558 for (; x <= (len - v_uint16::nlanes)*3; x += v_uint16::nlanes*3)
1561 v_load_deinterleave(src + x, b, g, r);
1562 v_uint32 bl, bh, gl, gh, rl, rh;
1563 v_expand(b, bl, bh);
1564 v_expand(g, gl, gh);
1565 v_expand(r, rl, rh);
1568 db = v_add_wrap(v_pack(v_round(v_muladd(v_cvt_f32(v_reinterpret_as_s32(bl)), m0, v_muladd(v_cvt_f32(v_reinterpret_as_s32(gl)), m1, v_muladd(v_cvt_f32(v_reinterpret_as_s32(rl)), m2, m3)))),
1569 v_round(v_muladd(v_cvt_f32(v_reinterpret_as_s32(bh)), m0, v_muladd(v_cvt_f32(v_reinterpret_as_s32(gh)), m1, v_muladd(v_cvt_f32(v_reinterpret_as_s32(rh)), m2, m3))))), delta);
1570 dg = v_add_wrap(v_pack(v_round(v_muladd(v_cvt_f32(v_reinterpret_as_s32(bl)), m4, v_muladd(v_cvt_f32(v_reinterpret_as_s32(gl)), m5, v_muladd(v_cvt_f32(v_reinterpret_as_s32(rl)), m6, m7)))),
1571 v_round(v_muladd(v_cvt_f32(v_reinterpret_as_s32(bh)), m4, v_muladd(v_cvt_f32(v_reinterpret_as_s32(gh)), m5, v_muladd(v_cvt_f32(v_reinterpret_as_s32(rh)), m6, m7))))), delta);
1572 dr = v_add_wrap(v_pack(v_round(v_muladd(v_cvt_f32(v_reinterpret_as_s32(bl)), m8, v_muladd(v_cvt_f32(v_reinterpret_as_s32(gl)), m9, v_muladd(v_cvt_f32(v_reinterpret_as_s32(rl)), m10, m11)))),
1573 v_round(v_muladd(v_cvt_f32(v_reinterpret_as_s32(bh)), m8, v_muladd(v_cvt_f32(v_reinterpret_as_s32(gh)), m9, v_muladd(v_cvt_f32(v_reinterpret_as_s32(rh)), m10, m11))))), delta);
1574 v_store_interleave(dst + x, v_reinterpret_as_u16(db), v_reinterpret_as_u16(dg), v_reinterpret_as_u16(dr));
1577 v_float32x4 _m0l(m[0], m[4], m[ 8], 0.f);
1578 v_float32x4 _m1l(m[1], m[5], m[ 9], 0.f);
1579 v_float32x4 _m2l(m[2], m[6], m[10], 0.f);
1580 v_float32x4 _m3l(m[3] - 32768.f, m[7] - 32768.f, m[11] - 32768.f, 0.f);
1581 v_float32x4 _m0h = v_rotate_left<1>(_m0l);
1582 v_float32x4 _m1h = v_rotate_left<1>(_m1l);
1583 v_float32x4 _m2h = v_rotate_left<1>(_m2l);
1584 v_float32x4 _m3h = v_rotate_left<1>(_m3l);
1585 v_int16x8 _delta(0, -32768, -32768, -32768, -32768, -32768, -32768, 0);
1586 for( ; x <= len*3 - v_uint16x8::nlanes; x += 3*v_uint16x8::nlanes/4 )
1587 v_store(dst + x, v_rotate_right<1>(v_reinterpret_as_u16(v_add_wrap(v_pack(
1588 v_round(v_matmuladd(v_cvt_f32(v_reinterpret_as_s32(v_load_expand(src + x ))), _m0h, _m1h, _m2h, _m3h)),
1589 v_round(v_matmuladd(v_cvt_f32(v_reinterpret_as_s32(v_load_expand(src + x + 3))), _m0l, _m1l, _m2l, _m3l))), _delta))));
1590 for( ; x < len * 3; x += 3 )
1592 float v0 = src[x], v1 = src[x + 1], v2 = src[x + 2];
1593 ushort t0 = saturate_cast<ushort>(m[0] * v0 + m[1] * v1 + m[ 2] * v2 + m[ 3]);
1594 ushort t1 = saturate_cast<ushort>(m[4] * v0 + m[5] * v1 + m[ 6] * v2 + m[ 7]);
1595 ushort t2 = saturate_cast<ushort>(m[8] * v0 + m[9] * v1 + m[10] * v2 + m[11]);
1596 dst[x] = t0; dst[x + 1] = t1; dst[x + 2] = t2;
1603 transform_(src, dst, m, len, scn, dcn);
1607 transform_32f( const float* src, float* dst, const float* m, int len, int scn, int dcn )
1609 #if CV_SIMD && !defined(__aarch64__)
1611 if( scn == 3 && dcn == 3 )
1613 int idx[v_float32::nlanes/2];
1614 for( int i = 0; i < v_float32::nlanes/4; i++ )
1617 idx[i + v_float32::nlanes/4] = 0;
1619 float _m[] = { m[0], m[4], m[ 8], 0.f,
1620 m[1], m[5], m[ 9], 0.f,
1621 m[2], m[6], m[10], 0.f,
1622 m[3], m[7], m[11], 0.f };
1623 v_float32 m0 = vx_lut_quads(_m , idx + v_float32::nlanes/4);
1624 v_float32 m1 = vx_lut_quads(_m + 4, idx + v_float32::nlanes/4);
1625 v_float32 m2 = vx_lut_quads(_m + 8, idx + v_float32::nlanes/4);
1626 v_float32 m3 = vx_lut_quads(_m + 12, idx + v_float32::nlanes/4);
1627 for( ; x <= len*3 - v_float32::nlanes; x += 3*v_float32::nlanes/4 )
1628 v_store(dst + x, v_pack_triplets(v_matmuladd(vx_lut_quads(src + x, idx), m0, m1, m2, m3)));
1629 for( ; x < len*3; x += 3 )
1631 float v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1632 float t0 = saturate_cast<float>(m[0]*v0 + m[1]*v1 + m[ 2]*v2 + m[ 3]);
1633 float t1 = saturate_cast<float>(m[4]*v0 + m[5]*v1 + m[ 6]*v2 + m[ 7]);
1634 float t2 = saturate_cast<float>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1635 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1641 if( scn == 4 && dcn == 4 )
1643 #if CV_SIMD_WIDTH > 16
1644 int idx[v_float32::nlanes/4];
1645 for( int i = 0; i < v_float32::nlanes/4; i++ )
1647 float _m[] = { m[4], m[9], m[14], m[19] };
1648 v_float32 m0 = vx_lut_quads(m , idx);
1649 v_float32 m1 = vx_lut_quads(m+ 5, idx);
1650 v_float32 m2 = vx_lut_quads(m+10, idx);
1651 v_float32 m3 = vx_lut_quads(m+15, idx);
1652 v_float32 m4 = vx_lut_quads(_m, idx);
1653 for( ; x <= len*4 - v_float32::nlanes; x += v_float32::nlanes )
1655 v_float32 v_src = vx_load(src + x);
1656 v_store(dst + x, v_reduce_sum4(v_src * m0, v_src * m1, v_src * m2, v_src * m3) + m4);
1659 v_float32x4 _m0 = v_load(m );
1660 v_float32x4 _m1 = v_load(m + 5);
1661 v_float32x4 _m2 = v_load(m + 10);
1662 v_float32x4 _m3 = v_load(m + 15);
1663 v_float32x4 _m4(m[4], m[9], m[14], m[19]);
1664 for( ; x < len*4; x += v_float32x4::nlanes )
1666 v_float32x4 v_src = v_load(src + x);
1667 v_store(dst + x, v_reduce_sum4(v_src * _m0, v_src * _m1, v_src * _m2, v_src * _m3) + _m4);
1674 transform_(src, dst, m, len, scn, dcn);
1679 transform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn)
1681 transform_(src, dst, m, len, scn, dcn);
1685 transform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn)
1687 transform_(src, dst, m, len, scn, dcn);
1691 transform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn)
1693 transform_(src, dst, m, len, scn, dcn);
1697 transform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
1699 transform_(src, dst, m, len, scn, dcn);
1702 template<typename T, typename WT> static void
1703 diagtransform_( const T* src, T* dst, const WT* m, int len, int cn, int )
1709 for( x = 0; x < len*2; x += 2 )
1711 T t0 = saturate_cast<T>(m[0]*src[x] + m[2]);
1712 T t1 = saturate_cast<T>(m[4]*src[x+1] + m[5]);
1713 dst[x] = t0; dst[x+1] = t1;
1718 for( x = 0; x < len*3; x += 3 )
1720 T t0 = saturate_cast<T>(m[0]*src[x] + m[3]);
1721 T t1 = saturate_cast<T>(m[5]*src[x+1] + m[7]);
1722 T t2 = saturate_cast<T>(m[10]*src[x+2] + m[11]);
1723 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1728 for( x = 0; x < len*4; x += 4 )
1730 T t0 = saturate_cast<T>(m[0]*src[x] + m[4]);
1731 T t1 = saturate_cast<T>(m[6]*src[x+1] + m[9]);
1732 dst[x] = t0; dst[x+1] = t1;
1733 t0 = saturate_cast<T>(m[12]*src[x+2] + m[14]);
1734 t1 = saturate_cast<T>(m[18]*src[x+3] + m[19]);
1735 dst[x+2] = t0; dst[x+3] = t1;
1740 for( x = 0; x < len; x++, src += cn, dst += cn )
1743 for( int j = 0; j < cn; j++, _m += cn + 1 )
1744 dst[j] = saturate_cast<T>(src[j]*_m[j] + _m[cn]);
1750 diagtransform_8u(const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn)
1752 diagtransform_(src, dst, m, len, scn, dcn);
1756 diagtransform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn)
1758 diagtransform_(src, dst, m, len, scn, dcn);
1762 diagtransform_16u(const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn)
1764 diagtransform_(src, dst, m, len, scn, dcn);
1768 diagtransform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn)
1770 diagtransform_(src, dst, m, len, scn, dcn);
1774 diagtransform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn)
1776 diagtransform_(src, dst, m, len, scn, dcn);
1780 diagtransform_32f(const float* src, float* dst, const float* m, int len, int scn, int dcn)
1782 diagtransform_(src, dst, m, len, scn, dcn);
1786 diagtransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
1788 diagtransform_(src, dst, m, len, scn, dcn);
1792 TransformFunc getTransformFunc(int depth)
1794 static TransformFunc transformTab[] =
1796 (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u,
1797 (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f,
1798 (TransformFunc)transform_64f, 0
1801 return transformTab[depth];
1804 TransformFunc getDiagTransformFunc(int depth)
1806 static TransformFunc diagTransformTab[] =
1808 (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u,
1809 (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f,
1810 (TransformFunc)diagtransform_64f, 0
1813 return diagTransformTab[depth];
1818 /****************************************************************************************\
1819 * Perspective Transform *
1820 \****************************************************************************************/
1822 template<typename T> static void
1823 perspectiveTransform_( const T* src, T* dst, const double* m, int len, int scn, int dcn )
1825 const double eps = FLT_EPSILON;
1828 if( scn == 2 && dcn == 2 )
1830 for( i = 0; i < len*2; i += 2 )
1832 T x = src[i], y = src[i + 1];
1833 double w = x*m[6] + y*m[7] + m[8];
1838 dst[i] = (T)((x*m[0] + y*m[1] + m[2])*w);
1839 dst[i+1] = (T)((x*m[3] + y*m[4] + m[5])*w);
1842 dst[i] = dst[i+1] = (T)0;
1845 else if( scn == 3 && dcn == 3 )
1847 for( i = 0; i < len*3; i += 3 )
1849 T x = src[i], y = src[i + 1], z = src[i + 2];
1850 double w = x*m[12] + y*m[13] + z*m[14] + m[15];
1855 dst[i] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3]) * w);
1856 dst[i+1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7]) * w);
1857 dst[i+2] = (T)((x*m[8] + y*m[9] + z*m[10] + m[11]) * w);
1860 dst[i] = dst[i+1] = dst[i+2] = (T)0;
1863 else if( scn == 3 && dcn == 2 )
1865 for( i = 0; i < len; i++, src += 3, dst += 2 )
1867 T x = src[0], y = src[1], z = src[2];
1868 double w = x*m[8] + y*m[9] + z*m[10] + m[11];
1873 dst[0] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3])*w);
1874 dst[1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7])*w);
1877 dst[0] = dst[1] = (T)0;
1882 for( i = 0; i < len; i++, src += scn, dst += dcn )
1884 const double* _m = m + dcn*(scn + 1);
1887 for( k = 0; k < scn; k++ )
1892 for( j = 0; j < dcn; j++, _m += scn + 1 )
1895 for( k = 0; k < scn; k++ )
1901 for( j = 0; j < dcn; j++ )
1908 perspectiveTransform_32f(const float* src, float* dst, const double* m, int len, int scn, int dcn)
1910 perspectiveTransform_(src, dst, m, len, scn, dcn);
1914 perspectiveTransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
1916 perspectiveTransform_(src, dst, m, len, scn, dcn);
1919 TransformFunc getPerspectiveTransform(int depth)
1921 if (depth == CV_32F)
1922 return (TransformFunc)perspectiveTransform_32f;
1923 if (depth == CV_64F)
1924 return (TransformFunc)perspectiveTransform_64f;
1925 CV_Assert(0 && "Not supported");
1930 /****************************************************************************************\
1932 \****************************************************************************************/
1934 static void scaleAdd_32f(const float* src1, const float* src2, float* dst,
1935 int len, float* _alpha)
1937 float alpha = *_alpha;
1940 v_float32 v_alpha = vx_setall_f32(alpha);
1941 const int cWidth = v_float32::nlanes;
1942 for (; i <= len - cWidth; i += cWidth)
1943 v_store(dst + i, v_muladd(vx_load(src1 + i), v_alpha, vx_load(src2 + i)));
1946 for (; i < len; i++)
1947 dst[i] = src1[i] * alpha + src2[i];
1951 static void scaleAdd_64f(const double* src1, const double* src2, double* dst,
1952 int len, double* _alpha)
1954 double alpha = *_alpha;
1957 v_float64 a2 = vx_setall_f64(alpha);
1958 const int cWidth = v_float64::nlanes;
1959 for (; i <= len - cWidth; i += cWidth)
1960 v_store(dst + i, v_muladd(vx_load(src1 + i), a2, vx_load(src2 + i)));
1963 for (; i < len; i++)
1964 dst[i] = src1[i] * alpha + src2[i];
1967 ScaleAddFunc getScaleAddFunc(int depth)
1969 if (depth == CV_32F)
1970 return (ScaleAddFunc)scaleAdd_32f;
1971 if (depth == CV_64F)
1972 return (ScaleAddFunc)scaleAdd_64f;
1973 CV_Assert(0 && "Not supported");
1978 /****************************************************************************************\
1980 \****************************************************************************************/
1982 template<typename T> static inline
1983 double MahalanobisImpl(const Mat& v1, const Mat& v2, const Mat& icovar, double *diff_buffer /*[len]*/, int len /*=v1.total()*/)
1985 CV_INSTRUMENT_REGION();
1987 Size sz = v1.size();
1990 sz.width *= v1.channels();
1991 if (v1.isContinuous() && v2.isContinuous())
1993 sz.width *= sz.height;
1998 const T* src1 = v1.ptr<T>();
1999 const T* src2 = v2.ptr<T>();
2000 size_t step1 = v1.step/sizeof(src1[0]);
2001 size_t step2 = v2.step/sizeof(src2[0]);
2002 double* diff = diff_buffer;
2003 const T* mat = icovar.ptr<T>();
2004 size_t matstep = icovar.step/sizeof(mat[0]);
2006 for (; sz.height--; src1 += step1, src2 += step2, diff += sz.width)
2008 for (int i = 0; i < sz.width; i++)
2009 diff[i] = src1[i] - src2[i];
2013 for (int i = 0; i < len; i++, mat += matstep)
2017 #if CV_ENABLE_UNROLLED
2018 for(; j <= len - 4; j += 4 )
2019 row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
2020 diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
2022 for (; j < len; j++)
2023 row_sum += diff[j]*mat[j];
2024 result += row_sum * diff[i];
2030 MahalanobisImplFunc getMahalanobisImplFunc(int depth)
2032 if (depth == CV_32F)
2033 return (MahalanobisImplFunc)MahalanobisImpl<float>;
2034 if (depth == CV_64F)
2035 return (MahalanobisImplFunc)MahalanobisImpl<double>;
2036 CV_Assert(0 && "Not supported");
2040 #if !defined(CV_MULTRANSPOSED_BASELINE_ONLY) || defined(CV_CPU_BASELINE_MODE)
2041 /****************************************************************************************\
2043 \****************************************************************************************/
2045 template<typename sT, typename dT> static void
2046 MulTransposedR(const Mat& srcmat, const Mat& dstmat, const Mat& deltamat, double scale)
2049 const sT* src = srcmat.ptr<sT>();
2050 dT* dst = (dT*)dstmat.ptr<dT>();
2051 const dT* delta = deltamat.ptr<dT>();
2052 size_t srcstep = srcmat.step/sizeof(src[0]);
2053 size_t dststep = dstmat.step/sizeof(dst[0]);
2054 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2055 int delta_cols = deltamat.cols;
2056 Size size = srcmat.size();
2060 int buf_size = size.height*sizeof(dT);
2061 AutoBuffer<uchar> buf;
2063 if( delta && delta_cols < size.width )
2065 assert( delta_cols == 1 );
2068 buf.allocate(buf_size);
2069 col_buf = (dT*)buf.data();
2071 if( delta && delta_cols < size.width )
2073 delta_buf = col_buf + size.height;
2074 for( i = 0; i < size.height; i++ )
2075 delta_buf[i*4] = delta_buf[i*4+1] =
2076 delta_buf[i*4+2] = delta_buf[i*4+3] = delta[i*deltastep];
2078 deltastep = deltastep ? 4 : 0;
2082 for( i = 0; i < size.width; i++, tdst += dststep )
2084 for( k = 0; k < size.height; k++ )
2085 col_buf[k] = src[k*srcstep+i];
2087 for( j = i; j <= size.width - 4; j += 4 )
2089 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2090 const sT *tsrc = src + j;
2092 for( k = 0; k < size.height; k++, tsrc += srcstep )
2094 double a = col_buf[k];
2101 tdst[j] = (dT)(s0*scale);
2102 tdst[j+1] = (dT)(s1*scale);
2103 tdst[j+2] = (dT)(s2*scale);
2104 tdst[j+3] = (dT)(s3*scale);
2107 for( ; j < size.width; j++ )
2110 const sT *tsrc = src + j;
2112 for( k = 0; k < size.height; k++, tsrc += srcstep )
2113 s0 += (double)col_buf[k] * tsrc[0];
2115 tdst[j] = (dT)(s0*scale);
2119 for( i = 0; i < size.width; i++, tdst += dststep )
2122 for( k = 0; k < size.height; k++ )
2123 col_buf[k] = src[k*srcstep+i] - delta[k*deltastep+i];
2125 for( k = 0; k < size.height; k++ )
2126 col_buf[k] = src[k*srcstep+i] - delta_buf[k*deltastep];
2128 for( j = i; j <= size.width - 4; j += 4 )
2130 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2131 const sT *tsrc = src + j;
2132 const dT *d = delta_buf ? delta_buf : delta + j;
2134 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2136 double a = col_buf[k];
2137 s0 += a * (tsrc[0] - d[0]);
2138 s1 += a * (tsrc[1] - d[1]);
2139 s2 += a * (tsrc[2] - d[2]);
2140 s3 += a * (tsrc[3] - d[3]);
2143 tdst[j] = (dT)(s0*scale);
2144 tdst[j+1] = (dT)(s1*scale);
2145 tdst[j+2] = (dT)(s2*scale);
2146 tdst[j+3] = (dT)(s3*scale);
2149 for( ; j < size.width; j++ )
2152 const sT *tsrc = src + j;
2153 const dT *d = delta_buf ? delta_buf : delta + j;
2155 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2156 s0 += (double)col_buf[k] * (tsrc[0] - d[0]);
2158 tdst[j] = (dT)(s0*scale);
2164 template<typename sT, typename dT> static void
2165 MulTransposedL(const Mat& srcmat, const Mat& dstmat, const Mat& deltamat, double scale)
2168 const sT* src = srcmat.ptr<sT>();
2169 dT* dst = (dT*)dstmat.ptr<dT>();
2170 const dT* delta = deltamat.ptr<dT>();
2171 size_t srcstep = srcmat.step/sizeof(src[0]);
2172 size_t dststep = dstmat.step/sizeof(dst[0]);
2173 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2174 int delta_cols = deltamat.cols;
2175 Size size = srcmat.size();
2179 for( i = 0; i < size.height; i++, tdst += dststep )
2180 for( j = i; j < size.height; j++ )
2183 const sT *tsrc1 = src + i*srcstep;
2184 const sT *tsrc2 = src + j*srcstep;
2186 for( k = 0; k <= size.width - 4; k += 4 )
2187 s += (double)tsrc1[k]*tsrc2[k] + (double)tsrc1[k+1]*tsrc2[k+1] +
2188 (double)tsrc1[k+2]*tsrc2[k+2] + (double)tsrc1[k+3]*tsrc2[k+3];
2189 for( ; k < size.width; k++ )
2190 s += (double)tsrc1[k] * tsrc2[k];
2191 tdst[j] = (dT)(s*scale);
2196 int delta_shift = delta_cols == size.width ? 4 : 0;
2197 AutoBuffer<uchar> buf(size.width*sizeof(dT));
2198 dT* row_buf = (dT*)buf.data();
2200 for( i = 0; i < size.height; i++, tdst += dststep )
2202 const sT *tsrc1 = src + i*srcstep;
2203 const dT *tdelta1 = delta + i*deltastep;
2205 if( delta_cols < size.width )
2206 for( k = 0; k < size.width; k++ )
2207 row_buf[k] = tsrc1[k] - tdelta1[0];
2209 for( k = 0; k < size.width; k++ )
2210 row_buf[k] = tsrc1[k] - tdelta1[k];
2212 for( j = i; j < size.height; j++ )
2215 const sT *tsrc2 = src + j*srcstep;
2216 const dT *tdelta2 = delta + j*deltastep;
2217 if( delta_cols < size.width )
2219 delta_buf[0] = delta_buf[1] =
2220 delta_buf[2] = delta_buf[3] = tdelta2[0];
2221 tdelta2 = delta_buf;
2223 for( k = 0; k <= size.width-4; k += 4, tdelta2 += delta_shift )
2224 s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]) +
2225 (double)row_buf[k+1]*(tsrc2[k+1] - tdelta2[1]) +
2226 (double)row_buf[k+2]*(tsrc2[k+2] - tdelta2[2]) +
2227 (double)row_buf[k+3]*(tsrc2[k+3] - tdelta2[3]);
2228 for( ; k < size.width; k++, tdelta2++ )
2229 s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]);
2230 tdst[j] = (dT)(s*scale);
2236 MulTransposedFunc getMulTransposedFunc(int stype, int dtype, bool ata)
2238 MulTransposedFunc func = NULL;
2239 if (stype == CV_8U && dtype == CV_32F)
2241 func = ata ? MulTransposedR<uchar,float>
2242 : MulTransposedL<uchar,float>;
2244 else if (stype == CV_8U && dtype == CV_64F)
2246 func = ata ? MulTransposedR<uchar,double>
2247 : MulTransposedL<uchar,double>;
2249 else if(stype == CV_16U && dtype == CV_32F)
2251 func = ata ? MulTransposedR<ushort,float>
2252 : MulTransposedL<ushort,float>;
2254 else if(stype == CV_16U && dtype == CV_64F)
2256 func = ata ? MulTransposedR<ushort,double>
2257 : MulTransposedL<ushort,double>;
2259 else if(stype == CV_16S && dtype == CV_32F)
2261 func = ata ? MulTransposedR<short,float>
2262 : MulTransposedL<short,float>;
2264 else if(stype == CV_16S && dtype == CV_64F)
2266 func = ata ? MulTransposedR<short,double>
2267 : MulTransposedL<short,double>;
2269 else if(stype == CV_32F && dtype == CV_32F)
2271 func = ata ? MulTransposedR<float,float>
2272 : MulTransposedL<float,float>;
2274 else if(stype == CV_32F && dtype == CV_64F)
2276 func = ata ? MulTransposedR<float,double>
2277 : MulTransposedL<float,double>;
2279 else if(stype == CV_64F && dtype == CV_64F)
2281 func = ata ? MulTransposedR<double,double>
2282 : MulTransposedL<double,double>;
2284 CV_Assert(func && "Not supported");
2287 #endif // !defined(CV_MULTRANSPOSED_BASELINE_ONLY) || defined(CV_CPU_BASELINE_MODE)
2290 /****************************************************************************************\
2292 \****************************************************************************************/
2294 template<typename T> static inline
2295 double dotProd_(const T* src1, const T* src2, int len)
2300 #if CV_ENABLE_UNROLLED
2301 for( ; i <= len - 4; i += 4 )
2302 result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] +
2303 (double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3];
2305 for( ; i < len; i++ )
2306 result += (double)src1[i]*src2[i];
2312 double dotProd_8u(const uchar* src1, const uchar* src2, int len)
2318 int len0 = len & -v_uint16::nlanes, blockSize0 = (1 << 15), blockSize;
2322 blockSize = std::min(len0 - i, blockSize0);
2323 v_uint32 v_sum = vx_setzero_u32();
2324 const int cWidth = v_uint16::nlanes;
2327 for (; j <= blockSize - cWidth * 2; j += cWidth * 2)
2329 v_uint8 v_src1 = vx_load(src1 + j);
2330 v_uint8 v_src2 = vx_load(src2 + j);
2331 v_sum = v_dotprod_expand_fast(v_src1, v_src2, v_sum);
2334 for (; j <= blockSize - cWidth; j += cWidth)
2336 v_int16 v_src10 = v_reinterpret_as_s16(vx_load_expand(src1 + j));
2337 v_int16 v_src20 = v_reinterpret_as_s16(vx_load_expand(src2 + j));
2338 v_sum += v_reinterpret_as_u32(v_dotprod_fast(v_src10, v_src20));
2340 r += (double)v_reduce_sum(v_sum);
2348 return r + dotProd_(src1, src2, len - i);
2352 double dotProd_8s(const schar* src1, const schar* src2, int len)
2358 int len0 = len & -v_int16::nlanes, blockSize0 = (1 << 14), blockSize;
2362 blockSize = std::min(len0 - i, blockSize0);
2363 v_int32 v_sum = vx_setzero_s32();
2364 const int cWidth = v_int16::nlanes;
2367 for (; j <= blockSize - cWidth * 2; j += cWidth * 2)
2369 v_int8 v_src1 = vx_load(src1 + j);
2370 v_int8 v_src2 = vx_load(src2 + j);
2371 v_sum = v_dotprod_expand_fast(v_src1, v_src2, v_sum);
2374 for (; j <= blockSize - cWidth; j += cWidth)
2376 v_int16 v_src1 = vx_load_expand(src1 + j);
2377 v_int16 v_src2 = vx_load_expand(src2 + j);
2378 v_sum = v_dotprod_fast(v_src1, v_src2, v_sum);
2380 r += (double)v_reduce_sum(v_sum);
2389 return r + dotProd_(src1, src2, len - i);
2392 double dotProd_16u(const ushort* src1, const ushort* src2, int len)
2398 int len0 = len & -v_uint16::nlanes, blockSize0 = (1 << 24), blockSize;
2402 blockSize = std::min(len0 - i, blockSize0);
2403 v_uint64 v_sum = vx_setzero_u64();
2404 const int cWidth = v_uint16::nlanes;
2407 for (; j <= blockSize - cWidth; j += cWidth)
2409 v_uint16 v_src1 = vx_load(src1 + j);
2410 v_uint16 v_src2 = vx_load(src2 + j);
2411 v_sum = v_dotprod_expand_fast(v_src1, v_src2, v_sum);
2413 r += (double)v_reduce_sum(v_sum);
2421 return r + dotProd_(src1, src2, len - i);
2424 double dotProd_16s(const short* src1, const short* src2, int len)
2430 int len0 = len & -v_int16::nlanes, blockSize0 = (1 << 24), blockSize;
2434 blockSize = std::min(len0 - i, blockSize0);
2435 v_int64 v_sum = vx_setzero_s64();
2436 const int cWidth = v_int16::nlanes;
2439 for (; j <= blockSize - cWidth; j += cWidth)
2441 v_int16 v_src1 = vx_load(src1 + j);
2442 v_int16 v_src2 = vx_load(src2 + j);
2443 v_sum = v_dotprod_expand_fast(v_src1, v_src2, v_sum);
2445 r += (double)v_reduce_sum(v_sum);
2453 return r + dotProd_(src1, src2, len - i);
2456 double dotProd_32s(const int* src1, const int* src2, int len)
2461 const int step = v_int32::nlanes;
2462 v_float64 v_sum0 = vx_setzero_f64();
2463 #if CV_SIMD_WIDTH == 16
2464 const int wstep = step * 2;
2465 v_float64 v_sum1 = vx_setzero_f64();
2466 for (; i < len - wstep; i += wstep, src1 += wstep, src2 += wstep)
2468 v_int32 v_src10 = vx_load(src1);
2469 v_int32 v_src20 = vx_load(src2);
2470 v_int32 v_src11 = vx_load(src1 + step);
2471 v_int32 v_src21 = vx_load(src2 + step);
2472 v_sum0 = v_dotprod_expand_fast(v_src10, v_src20, v_sum0);
2473 v_sum1 = v_dotprod_expand_fast(v_src11, v_src21, v_sum1);
2477 for (; i < len - step; i += step, src1 += step, src2 += step)
2479 v_int32 v_src1 = vx_load(src1);
2480 v_int32 v_src2 = vx_load(src2);
2481 v_sum0 = v_dotprod_expand_fast(v_src1, v_src2, v_sum0);
2483 r = v_reduce_sum(v_sum0);
2485 return r + dotProd_(src1, src2, len - i);
2487 return dotProd_(src1, src2, len);
2491 double dotProd_32f(const float* src1, const float* src2, int len)
2497 int len0 = len & -v_float32::nlanes, blockSize0 = (1 << 13), blockSize;
2501 blockSize = std::min(len0 - i, blockSize0);
2502 v_float32 v_sum = vx_setzero_f32();
2505 int cWidth = v_float32::nlanes;
2507 #if CV_ENABLE_UNROLLED
2508 v_float32 v_sum1 = vx_setzero_f32();
2509 v_float32 v_sum2 = vx_setzero_f32();
2510 v_float32 v_sum3 = vx_setzero_f32();
2512 for (; j <= blockSize - (cWidth * 4); j += (cWidth * 4))
2514 v_sum = v_muladd(vx_load(src1 + j),
2515 vx_load(src2 + j), v_sum);
2516 v_sum1 = v_muladd(vx_load(src1 + j + cWidth),
2517 vx_load(src2 + j + cWidth), v_sum1);
2518 v_sum2 = v_muladd(vx_load(src1 + j + (cWidth * 2)),
2519 vx_load(src2 + j + (cWidth * 2)), v_sum2);
2520 v_sum3 = v_muladd(vx_load(src1 + j + (cWidth * 3)),
2521 vx_load(src2 + j + (cWidth * 3)), v_sum3);
2524 v_sum += v_sum1 + v_sum2 + v_sum3;
2527 for (; j <= blockSize - cWidth; j += cWidth)
2528 v_sum = v_muladd(vx_load(src1 + j), vx_load(src2 + j), v_sum);
2530 r += v_reduce_sum(v_sum);
2538 return r + dotProd_(src1, src2, len - i);
2541 double dotProd_64f(const double* src1, const double* src2, int len)
2543 return dotProd_(src1, src2, len);
2547 CV_CPU_OPTIMIZATION_NAMESPACE_END