1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencl_kernels_core.hpp"
45 #include "opencv2/core/opencl/runtime/opencl_clamdblas.hpp"
50 /****************************************************************************************\
52 \****************************************************************************************/
55 GEMM_CopyBlock( const uchar* src, size_t src_step,
56 uchar* dst, size_t dst_step,
57 Size size, size_t pix_size )
60 size.width *= (int)(pix_size / sizeof(int));
62 for( ; size.height--; src += src_step, dst += dst_step )
65 #if CV_ENABLE_UNROLLED
66 for( ; j <= size.width - 4; j += 4 )
68 int t0 = ((const int*)src)[j];
69 int t1 = ((const int*)src)[j+1];
71 ((int*)dst)[j+1] = t1;
72 t0 = ((const int*)src)[j+2];
73 t1 = ((const int*)src)[j+3];
74 ((int*)dst)[j+2] = t0;
75 ((int*)dst)[j+3] = t1;
78 for( ; j < size.width; j++ )
79 ((int*)dst)[j] = ((const int*)src)[j];
85 GEMM_TransposeBlock( const uchar* src, size_t src_step,
86 uchar* dst, size_t dst_step,
87 Size size, size_t pix_size )
90 for( i = 0; i < size.width; i++, dst += dst_step, src += pix_size )
92 const uchar* _src = src;
96 for( j = 0; j < size.height; j++, _src += src_step )
97 ((int*)dst)[j] = ((int*)_src)[0];
100 for( j = 0; j < size.height*2; j += 2, _src += src_step )
102 int t0 = ((int*)_src)[0];
103 int t1 = ((int*)_src)[1];
105 ((int*)dst)[j+1] = t1;
109 for( j = 0; j < size.height*4; j += 4, _src += src_step )
111 int t0 = ((int*)_src)[0];
112 int t1 = ((int*)_src)[1];
114 ((int*)dst)[j+1] = t1;
115 t0 = ((int*)_src)[2];
116 t1 = ((int*)_src)[3];
117 ((int*)dst)[j+2] = t0;
118 ((int*)dst)[j+3] = t1;
129 template<typename T, typename WT> static void
130 GEMMSingleMul( const T* a_data, size_t a_step,
131 const T* b_data, size_t b_step,
132 const T* c_data, size_t c_step,
133 T* d_data, size_t d_step,
134 Size a_size, Size d_size,
135 double alpha, double beta, int flags )
137 int i, j, k, n = a_size.width, m = d_size.width, drows = d_size.height;
138 const T *_a_data = a_data, *_b_data = b_data, *_c_data = c_data;
139 cv::AutoBuffer<T> _a_buf;
141 size_t a_step0, a_step1, c_step0, c_step1, t_step;
143 a_step /= sizeof(a_data[0]);
144 b_step /= sizeof(b_data[0]);
145 c_step /= sizeof(c_data[0]);
146 d_step /= sizeof(d_data[0]);
151 c_step0 = c_step1 = 0;
152 else if( !(flags & GEMM_3_T) )
153 c_step0 = c_step, c_step1 = 1;
155 c_step0 = 1, c_step1 = c_step;
157 if( flags & GEMM_1_T )
159 CV_SWAP( a_step0, a_step1, t_step );
161 if( a_step > 1 && n > 1 )
168 if( n == 1 ) /* external product */
170 cv::AutoBuffer<T> _b_buf;
173 if( a_step > 1 && a_size.height > 1 )
175 _a_buf.allocate(drows);
177 for( k = 0; k < drows; k++ )
178 a_buf[k] = a_data[a_step*k];
184 _b_buf.allocate(d_size.width);
186 for( j = 0; j < d_size.width; j++ )
187 b_buf[j] = b_data[j*b_step];
191 for( i = 0; i < drows; i++, _c_data += c_step0, d_data += d_step )
193 WT al = WT(a_data[i])*alpha;
195 for( j = 0; j <= d_size.width - 2; j += 2, c_data += 2*c_step1 )
197 WT s0 = al*WT(b_data[j]);
198 WT s1 = al*WT(b_data[j+1]);
206 d_data[j] = T(s0 + WT(c_data[0])*beta);
207 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
211 for( ; j < d_size.width; j++, c_data += c_step1 )
213 WT s0 = al*WT(b_data[j]);
217 d_data[j] = T(s0 + WT(c_data[0])*beta);
221 else if( flags & GEMM_2_T ) /* A * Bt */
223 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
231 for( k = 0; k < n; k++ )
232 a_buf[k] = a_data[a_step1*k];
236 for( j = 0; j < d_size.width; j++, b_data += b_step,
239 WT s0(0), s1(0), s2(0), s3(0);
241 #if CV_ENABLE_UNROLLED
242 for( ; k <= n - 4; k += 4 )
244 s0 += WT(a_data[k])*WT(b_data[k]);
245 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
246 s2 += WT(a_data[k+2])*WT(b_data[k+2]);
247 s3 += WT(a_data[k+3])*WT(b_data[k+3]);
251 s0 += WT(a_data[k])*WT(b_data[k]);
252 s0 = (s0+s1+s2+s3)*alpha;
257 d_data[j] = T(s0 + WT(c_data[0])*beta);
261 else if( d_size.width*sizeof(d_data[0]) <= 1600 )
263 for( i = 0; i < drows; i++, _a_data += a_step0,
267 a_data = _a_data, c_data = _c_data;
271 for( k = 0; k < n; k++ )
272 a_buf[k] = a_data[a_step1*k];
276 for( j = 0; j <= m - 4; j += 4, c_data += 4*c_step1 )
278 const T* b = _b_data + j;
279 WT s0(0), s1(0), s2(0), s3(0);
281 for( k = 0; k < n; k++, b += b_step )
284 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
285 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
290 d_data[j] = T(s0*alpha);
291 d_data[j+1] = T(s1*alpha);
292 d_data[j+2] = T(s2*alpha);
293 d_data[j+3] = T(s3*alpha);
297 s0 = s0*alpha; s1 = s1*alpha;
298 s2 = s2*alpha; s3 = s3*alpha;
299 d_data[j] = T(s0 + WT(c_data[0])*beta);
300 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
301 d_data[j+2] = T(s2 + WT(c_data[c_step1*2])*beta);
302 d_data[j+3] = T(s3 + WT(c_data[c_step1*3])*beta);
306 for( ; j < m; j++, c_data += c_step1 )
308 const T* b = _b_data + j;
311 for( k = 0; k < n; k++, b += b_step )
312 s0 += WT(a_data[k]) * WT(b[0]);
318 d_data[j] = T(s0 + WT(c_data[0])*beta);
324 cv::AutoBuffer<WT> _d_buf(m);
327 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
335 for( k = 0; k < n; k++ )
336 a_buf[k] = _a_data[a_step1*k];
340 for( j = 0; j < m; j++ )
343 for( k = 0; k < n; k++, b_data += b_step )
347 #if CV_ENABLE_UNROLLED
348 for(; j <= m - 4; j += 4 )
350 WT t0 = d_buf[j] + WT(b_data[j])*al;
351 WT t1 = d_buf[j+1] + WT(b_data[j+1])*al;
354 t0 = d_buf[j+2] + WT(b_data[j+2])*al;
355 t1 = d_buf[j+3] + WT(b_data[j+3])*al;
361 d_buf[j] += WT(b_data[j])*al;
365 for( j = 0; j < m; j++ )
366 d_data[j] = T(d_buf[j]*alpha);
368 for( j = 0; j < m; j++, c_data += c_step1 )
370 WT t = d_buf[j]*alpha;
371 d_data[j] = T(t + WT(c_data[0])*beta);
378 template<typename T, typename WT> static void
379 GEMMBlockMul( const T* a_data, size_t a_step,
380 const T* b_data, size_t b_step,
381 WT* d_data, size_t d_step,
382 Size a_size, Size d_size, int flags )
384 int i, j, k, n = a_size.width, m = d_size.width;
385 const T *_a_data = a_data, *_b_data = b_data;
386 cv::AutoBuffer<T> _a_buf;
388 size_t a_step0, a_step1, t_step;
389 int do_acc = flags & 16;
391 a_step /= sizeof(a_data[0]);
392 b_step /= sizeof(b_data[0]);
393 d_step /= sizeof(d_data[0]);
398 if( flags & GEMM_1_T )
400 CV_SWAP( a_step0, a_step1, t_step );
406 if( flags & GEMM_2_T )
408 /* second operand is transposed */
409 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
411 a_data = _a_data; b_data = _b_data;
415 for( k = 0; k < n; k++ )
416 a_buf[k] = a_data[a_step1*k];
420 for( j = 0; j < d_size.width; j++, b_data += b_step )
422 WT s0 = do_acc ? d_data[j]:WT(0), s1(0);
423 for( k = 0; k <= n - 2; k += 2 )
425 s0 += WT(a_data[k])*WT(b_data[k]);
426 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
430 s0 += WT(a_data[k])*WT(b_data[k]);
438 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
440 a_data = _a_data, b_data = _b_data;
444 for( k = 0; k < n; k++ )
445 a_buf[k] = a_data[a_step1*k];
449 for( j = 0; j <= m - 4; j += 4 )
452 const T* b = b_data + j;
456 s0 = d_data[j]; s1 = d_data[j+1];
457 s2 = d_data[j+2]; s3 = d_data[j+3];
460 s0 = s1 = s2 = s3 = WT(0);
462 for( k = 0; k < n; k++, b += b_step )
465 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
466 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
469 d_data[j] = s0; d_data[j+1] = s1;
470 d_data[j+2] = s2; d_data[j+3] = s3;
475 const T* b = b_data + j;
476 WT s0 = do_acc ? d_data[j] : WT(0);
478 for( k = 0; k < n; k++, b += b_step )
479 s0 += WT(a_data[k]) * WT(b[0]);
488 template<typename T, typename WT> static void
489 GEMMStore( const T* c_data, size_t c_step,
490 const WT* d_buf, size_t d_buf_step,
491 T* d_data, size_t d_step, Size d_size,
492 double alpha, double beta, int flags )
494 const T* _c_data = c_data;
496 size_t c_step0, c_step1;
498 c_step /= sizeof(c_data[0]);
499 d_buf_step /= sizeof(d_buf[0]);
500 d_step /= sizeof(d_data[0]);
503 c_step0 = c_step1 = 0;
504 else if( !(flags & GEMM_3_T) )
505 c_step0 = c_step, c_step1 = 1;
507 c_step0 = 1, c_step1 = c_step;
509 for( ; d_size.height--; _c_data += c_step0, d_buf += d_buf_step, d_data += d_step )
515 #if CV_ENABLE_UNROLLED
516 for(; j <= d_size.width - 4; j += 4, c_data += 4*c_step1 )
518 WT t0 = alpha*d_buf[j];
519 WT t1 = alpha*d_buf[j+1];
520 t0 += beta*WT(c_data[0]);
521 t1 += beta*WT(c_data[c_step1]);
524 t0 = alpha*d_buf[j+2];
525 t1 = alpha*d_buf[j+3];
526 t0 += beta*WT(c_data[c_step1*2]);
527 t1 += beta*WT(c_data[c_step1*3]);
532 for( ; j < d_size.width; j++, c_data += c_step1 )
534 WT t0 = alpha*d_buf[j];
535 d_data[j] = T(t0 + WT(c_data[0])*beta);
541 #if CV_ENABLE_UNROLLED
542 for( ; j <= d_size.width - 4; j += 4 )
544 WT t0 = alpha*d_buf[j];
545 WT t1 = alpha*d_buf[j+1];
548 t0 = alpha*d_buf[j+2];
549 t1 = alpha*d_buf[j+3];
554 for( ; j < d_size.width; j++ )
555 d_data[j] = T(alpha*d_buf[j]);
561 typedef void (*GEMMSingleMulFunc)( const void* src1, size_t step1,
562 const void* src2, size_t step2, const void* src3, size_t step3,
563 void* dst, size_t dststep, Size srcsize, Size dstsize,
564 double alpha, double beta, int flags );
566 typedef void (*GEMMBlockMulFunc)( const void* src1, size_t step1,
567 const void* src2, size_t step2, void* dst, size_t dststep,
568 Size srcsize, Size dstsize, int flags );
570 typedef void (*GEMMStoreFunc)( const void* src1, size_t step1,
571 const void* src2, size_t step2, void* dst, size_t dststep,
572 Size dstsize, double alpha, double beta, int flags );
574 static void GEMMSingleMul_32f( const float* a_data, size_t a_step,
575 const float* b_data, size_t b_step,
576 const float* c_data, size_t c_step,
577 float* d_data, size_t d_step,
578 Size a_size, Size d_size,
579 double alpha, double beta, int flags )
581 GEMMSingleMul<float,double>(a_data, a_step, b_data, b_step, c_data,
582 c_step, d_data, d_step, a_size, d_size,
586 static void GEMMSingleMul_64f( const double* a_data, size_t a_step,
587 const double* b_data, size_t b_step,
588 const double* c_data, size_t c_step,
589 double* d_data, size_t d_step,
590 Size a_size, Size d_size,
591 double alpha, double beta, int flags )
593 GEMMSingleMul<double,double>(a_data, a_step, b_data, b_step, c_data,
594 c_step, d_data, d_step, a_size, d_size,
599 static void GEMMSingleMul_32fc( const Complexf* a_data, size_t a_step,
600 const Complexf* b_data, size_t b_step,
601 const Complexf* c_data, size_t c_step,
602 Complexf* d_data, size_t d_step,
603 Size a_size, Size d_size,
604 double alpha, double beta, int flags )
606 GEMMSingleMul<Complexf,Complexd>(a_data, a_step, b_data, b_step, c_data,
607 c_step, d_data, d_step, a_size, d_size,
611 static void GEMMSingleMul_64fc( const Complexd* a_data, size_t a_step,
612 const Complexd* b_data, size_t b_step,
613 const Complexd* c_data, size_t c_step,
614 Complexd* d_data, size_t d_step,
615 Size a_size, Size d_size,
616 double alpha, double beta, int flags )
618 GEMMSingleMul<Complexd,Complexd>(a_data, a_step, b_data, b_step, c_data,
619 c_step, d_data, d_step, a_size, d_size,
623 static void GEMMBlockMul_32f( const float* a_data, size_t a_step,
624 const float* b_data, size_t b_step,
625 double* d_data, size_t d_step,
626 Size a_size, Size d_size, int flags )
628 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
632 static void GEMMBlockMul_64f( const double* a_data, size_t a_step,
633 const double* b_data, size_t b_step,
634 double* d_data, size_t d_step,
635 Size a_size, Size d_size, int flags )
637 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
641 static void GEMMBlockMul_32fc( const Complexf* a_data, size_t a_step,
642 const Complexf* b_data, size_t b_step,
643 Complexd* d_data, size_t d_step,
644 Size a_size, Size d_size, int flags )
646 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
650 static void GEMMBlockMul_64fc( const Complexd* a_data, size_t a_step,
651 const Complexd* b_data, size_t b_step,
652 Complexd* d_data, size_t d_step,
653 Size a_size, Size d_size, int flags )
655 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
659 static void GEMMStore_32f( const float* c_data, size_t c_step,
660 const double* d_buf, size_t d_buf_step,
661 float* d_data, size_t d_step, Size d_size,
662 double alpha, double beta, int flags )
664 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
668 static void GEMMStore_64f( const double* c_data, size_t c_step,
669 const double* d_buf, size_t d_buf_step,
670 double* d_data, size_t d_step, Size d_size,
671 double alpha, double beta, int flags )
673 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
677 static void GEMMStore_32fc( const Complexf* c_data, size_t c_step,
678 const Complexd* d_buf, size_t d_buf_step,
679 Complexf* d_data, size_t d_step, Size d_size,
680 double alpha, double beta, int flags )
682 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
686 static void GEMMStore_64fc( const Complexd* c_data, size_t c_step,
687 const Complexd* d_buf, size_t d_buf_step,
688 Complexd* d_data, size_t d_step, Size d_size,
689 double alpha, double beta, int flags )
691 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
694 #ifdef HAVE_CLAMDBLAS
696 static bool ocl_gemm_amdblas( InputArray matA, InputArray matB, double alpha,
697 InputArray matC, double beta, OutputArray matD, int flags )
699 int type = matA.type(), esz = CV_ELEM_SIZE(type);
700 bool haveC = matC.kind() != cv::_InputArray::NONE;
701 Size sizeA = matA.size(), sizeB = matB.size(), sizeC = haveC ? matC.size() : Size(0, 0);
702 bool atrans = (flags & GEMM_1_T) != 0, btrans = (flags & GEMM_2_T) != 0, ctrans = (flags & GEMM_3_T) != 0;
705 sizeA = Size(sizeA.height, sizeA.width);
707 sizeB = Size(sizeB.height, sizeB.width);
709 sizeC = Size(sizeC.height, sizeC.width);
711 Size sizeD(sizeB.width, sizeA.height);
713 CV_Assert( matB.type() == type && (!haveC || matC.type() == type) );
714 CV_Assert( sizeA.width == sizeB.height && (!haveC || sizeC == sizeD) );
716 matD.create(sizeD, type);
717 if ( matA.offset() % esz != 0 || matA.step() % esz != 0 ||
718 matB.offset() % esz != 0 || matB.step() % esz != 0 ||
719 (haveC && (matC.offset() % esz != 0 || matC.step() % esz != 0)) )
722 UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat();
724 ctrans ? transpose(matC, D) : matC.copyTo(D);
726 D.setTo(Scalar::all(0));
728 int M = sizeD.height, N = sizeD.width, K = sizeA.width;
729 int lda = (int)A.step / esz, ldb = (int)B.step / esz, ldc = (int)D.step / esz;
730 int offa = (int)A.offset / esz, offb = (int)B.offset / esz, offc = (int)D.offset / esz;
732 cl_command_queue clq = (cl_command_queue)ocl::Queue::getDefault().ptr();
733 clAmdBlasTranspose transA = atrans ? clAmdBlasTrans : clAmdBlasNoTrans;
734 clAmdBlasTranspose transB = btrans ? clAmdBlasTrans : clAmdBlasNoTrans;
735 clAmdBlasOrder order = clAmdBlasRowMajor;
736 clAmdBlasStatus status = clAmdBlasSuccess;
738 if (type == CV_32FC1)
739 status = clAmdBlasSgemmEx(order, transA, transB, M, N, K,
740 (cl_float)alpha, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
741 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
742 (cl_float)beta, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
743 1, &clq, 0, NULL, NULL);
744 else if (type == CV_64FC1)
745 status = clAmdBlasDgemmEx(order, transA, transB, M, N, K,
746 alpha, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
747 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
748 beta, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
749 1, &clq, 0, NULL, NULL);
750 else if (type == CV_32FC2)
752 cl_float2 alpha_2 = { { (cl_float)alpha, 0 } };
753 cl_float2 beta_2 = { { (cl_float)beta, 0 } };
754 status = clAmdBlasCgemmEx(order, transA, transB, M, N, K,
755 alpha_2, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
756 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
757 beta_2, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
758 1, &clq, 0, NULL, NULL);
760 else if (type == CV_64FC2)
762 cl_double2 alpha_2 = { { alpha, 0 } };
763 cl_double2 beta_2 = { { beta, 0 } };
764 status = clAmdBlasZgemmEx(order, transA, transB, M, N, K,
765 alpha_2, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
766 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
767 beta_2, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
768 1, &clq, 0, NULL, NULL);
771 CV_Error(Error::StsUnsupportedFormat, "");
773 return status == clAmdBlasSuccess;
780 static bool ocl_gemm( InputArray matA, InputArray matB, double alpha,
781 InputArray matC, double beta, OutputArray matD, int flags )
783 int depth = matA.depth(), cn = matA.channels();
784 int type = CV_MAKETYPE(depth, cn);
785 const int block_size = 16;
787 CV_Assert( type == matB.type() && (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
789 const ocl::Device & dev = ocl::Device::getDefault();
790 bool doubleSupport = dev.doubleFPConfig() > 0;
792 if ((!doubleSupport && depth == CV_64F))
795 bool haveC = matC.kind() != cv::_InputArray::NONE;
796 Size sizeA = matA.size(), sizeB = matB.size(), sizeC = haveC ? matC.size() : Size(0, 0);
797 bool atrans = (flags & GEMM_1_T) != 0, btrans = (flags & GEMM_2_T) != 0, ctrans = (flags & GEMM_3_T) != 0;
800 sizeA = Size(sizeA.height, sizeA.width);
802 sizeB = Size(sizeB.height, sizeB.width);
804 sizeC = Size(sizeC.height, sizeC.width);
806 Size sizeD(sizeB.width, sizeA.height);
808 CV_Assert( matB.type() == type && (!haveC || matC.type() == type) );
809 CV_Assert( sizeA.width == sizeB.height && (!haveC || sizeC == sizeD) );
811 String opts = format("-D T=%s -D T1=%s -D cn=%d -D LOCAL_SIZE=%d %s %s",
812 ocl::typeToStr(type), ocl::typeToStr(depth), cn, block_size,
813 haveC ? "-D HAVE_C" : "",
814 doubleSupport ? " -D DOUBLE_SUPPORT" : "");
816 ocl::Kernel k("gemm", cv::ocl::core::gemm_oclsrc, opts);
820 matD.create(sizeD, type);
822 UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat();
831 ctrans ? transpose(matC, D) : matC.copyTo(D);
833 D.setTo(Scalar::all(0));
836 k.args(ocl::KernelArg::ReadOnlyNoSize(A),
837 ocl::KernelArg::ReadOnlyNoSize(B),
838 ocl::KernelArg::ReadWrite(D),
839 sizeA.width, alpha, beta);
841 k.args(ocl::KernelArg::ReadOnlyNoSize(A),
842 ocl::KernelArg::ReadOnlyNoSize(B),
843 ocl::KernelArg::ReadWrite(D),
844 sizeA.width, (float)alpha, (float)beta);
846 size_t globalsize[2] = { sizeD.width, sizeD.height};
847 size_t localsize[2] = { block_size, block_size};
848 return k.run(2, globalsize, localsize, false);
855 void cv::gemm( InputArray matA, InputArray matB, double alpha,
856 InputArray matC, double beta, OutputArray _matD, int flags )
858 #ifdef HAVE_CLAMDBLAS
859 CV_OCL_RUN(ocl::haveAmdBlas() && matA.dims() <= 2 && matB.dims() <= 2 && matC.dims() <= 2 && _matD.isUMat() &&
860 matA.cols() > 20 && matA.rows() > 20 && matB.cols() > 20, // since it works incorrect for small sizes
861 ocl_gemm_amdblas(matA, matB, alpha, matC, beta, _matD, flags))
865 CV_OCL_RUN(_matD.isUMat() && matA.dims() <= 2 && matB.dims() <= 2 && matC.dims() <= 2,
866 ocl_gemm(matA, matB, alpha, matC, beta, _matD, flags))
869 const int block_lin_size = 128;
870 const int block_size = block_lin_size * block_lin_size;
872 static double zero[] = {0,0,0,0};
873 static float zerof[] = {0,0,0,0};
875 Mat A = matA.getMat(), B = matB.getMat(), C = beta != 0 ? matC.getMat() : Mat();
876 Size a_size = A.size(), d_size;
877 int i, len = 0, type = A.type();
879 CV_Assert( type == B.type() && (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
881 switch( flags & (GEMM_1_T|GEMM_2_T) )
884 d_size = Size( B.cols, a_size.height );
886 CV_Assert( a_size.width == len );
889 d_size = Size( B.cols, a_size.width );
891 CV_Assert( a_size.height == len );
894 d_size = Size( B.rows, a_size.height );
896 CV_Assert( a_size.width == len );
899 d_size = Size( B.rows, a_size.width );
901 CV_Assert( a_size.height == len );
907 CV_Assert( C.type() == type &&
908 (((flags&GEMM_3_T) == 0 && C.rows == d_size.height && C.cols == d_size.width) ||
909 ((flags&GEMM_3_T) != 0 && C.rows == d_size.width && C.cols == d_size.height)));
912 _matD.create( d_size.height, d_size.width, type );
913 Mat D = _matD.getMat();
914 if( (flags & GEMM_3_T) != 0 && C.data == D.data )
920 if( flags == 0 && 2 <= len && len <= 4 && (len == d_size.width || len == d_size.height) )
924 float* d = D.ptr<float>();
925 const float *a = A.ptr<float>(),
927 *c = (const float*)C.data;
928 size_t d_step = D.step/sizeof(d[0]),
929 a_step = A.step/sizeof(a[0]),
930 b_step = B.step/sizeof(b[0]),
931 c_step = C.data ? C.step/sizeof(c[0]) : 0;
939 if( len == d_size.width && b != d )
941 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
943 float t0 = a[0]*b[0] + a[1]*b[b_step];
944 float t1 = a[0]*b[1] + a[1]*b[b_step+1];
945 d[0] = (float)(t0*alpha + c[0]*beta);
946 d[1] = (float)(t1*alpha + c[1]*beta);
958 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
960 float t0 = a[0]*b[0] + a[1]*b[b_step];
961 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
962 d[0] = (float)(t0*alpha + c[0]*beta);
963 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
970 if( len == d_size.width && b != d )
972 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
974 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
975 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
976 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
977 d[0] = (float)(t0*alpha + c[0]*beta);
978 d[1] = (float)(t1*alpha + c[1]*beta);
979 d[2] = (float)(t2*alpha + c[2]*beta);
991 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
993 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
994 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
995 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
997 d[0] = (float)(t0*alpha + c[0]*beta);
998 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
999 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
1006 if( len == d_size.width && b != d )
1008 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1010 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1011 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
1012 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
1013 float t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
1014 d[0] = (float)(t0*alpha + c[0]*beta);
1015 d[1] = (float)(t1*alpha + c[1]*beta);
1016 d[2] = (float)(t2*alpha + c[2]*beta);
1017 d[3] = (float)(t3*alpha + c[3]*beta);
1020 else if( len <= 16 && a != d )
1029 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1031 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1032 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
1033 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
1034 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
1035 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
1036 float t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
1037 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
1038 d[0] = (float)(t0*alpha + c[0]*beta);
1039 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
1040 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
1041 d[d_step*3] = (float)(t3*alpha + c[c_step*3]*beta);
1050 if( type == CV_64F )
1052 double* d = D.ptr<double>();
1053 const double *a = A.ptr<double>(),
1054 *b = B.ptr<double>(),
1055 *c = (const double*)C.data;
1056 size_t d_step = D.step/sizeof(d[0]),
1057 a_step = A.step/sizeof(a[0]),
1058 b_step = B.step/sizeof(b[0]),
1059 c_step = C.data ? C.step/sizeof(c[0]) : 0;
1066 if( len == d_size.width && b != d )
1068 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1070 double t0 = a[0]*b[0] + a[1]*b[b_step];
1071 double t1 = a[0]*b[1] + a[1]*b[b_step+1];
1072 d[0] = t0*alpha + c[0]*beta;
1073 d[1] = t1*alpha + c[1]*beta;
1085 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1087 double t0 = a[0]*b[0] + a[1]*b[b_step];
1088 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
1089 d[0] = t0*alpha + c[0]*beta;
1090 d[d_step] = t1*alpha + c[c_step]*beta;
1097 if( len == d_size.width && b != d )
1099 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1101 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
1102 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
1103 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
1104 d[0] = t0*alpha + c[0]*beta;
1105 d[1] = t1*alpha + c[1]*beta;
1106 d[2] = t2*alpha + c[2]*beta;
1118 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1120 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
1121 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
1122 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
1124 d[0] = t0*alpha + c[0]*beta;
1125 d[d_step] = t1*alpha + c[c_step]*beta;
1126 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
1133 if( len == d_size.width && b != d )
1135 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1137 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1138 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
1139 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
1140 double t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
1141 d[0] = t0*alpha + c[0]*beta;
1142 d[1] = t1*alpha + c[1]*beta;
1143 d[2] = t2*alpha + c[2]*beta;
1144 d[3] = t3*alpha + c[3]*beta;
1147 else if( d_size.width <= 16 && a != d )
1156 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1158 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1159 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
1160 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
1161 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
1162 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
1163 double t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
1164 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
1165 d[0] = t0*alpha + c[0]*beta;
1166 d[d_step] = t1*alpha + c[c_step]*beta;
1167 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
1168 d[d_step*3] = t3*alpha + c[c_step*3]*beta;
1179 size_t b_step = B.step;
1180 GEMMSingleMulFunc singleMulFunc;
1181 GEMMBlockMulFunc blockMulFunc;
1182 GEMMStoreFunc storeFunc;
1183 Mat *matD = &D, tmat;
1185 const uchar* Cdata = C.data;
1186 size_t Cstep = C.data ? (size_t)C.step : 0;
1187 AutoBuffer<uchar> buf;
1189 if( type == CV_32FC1 )
1191 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32f;
1192 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32f;
1193 storeFunc = (GEMMStoreFunc)GEMMStore_32f;
1195 else if( type == CV_64FC1 )
1197 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64f;
1198 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64f;
1199 storeFunc = (GEMMStoreFunc)GEMMStore_64f;
1201 else if( type == CV_32FC2 )
1203 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32fc;
1204 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32fc;
1205 storeFunc = (GEMMStoreFunc)GEMMStore_32fc;
1209 CV_Assert( type == CV_64FC2 );
1210 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64fc;
1211 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64fc;
1212 storeFunc = (GEMMStoreFunc)GEMMStore_64fc;
1215 if( D.data == A.data || D.data == B.data )
1217 tmat_size = d_size.width*d_size.height*CV_ELEM_SIZE(type);
1218 // Allocate tmat later, once the size of buf is known
1222 if( (d_size.width == 1 || len == 1) && !(flags & GEMM_2_T) && B.isContinuous() )
1224 b_step = d_size.width == 1 ? 0 : CV_ELEM_SIZE(type);
1228 /*if( (d_size.width | d_size.height | len) >= 16 && icvBLAS_GEMM_32f_p != 0 )
1230 blas_func = type == CV_32FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32f_p :
1231 type == CV_64FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64f_p :
1232 type == CV_32FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32fc_p :
1233 type == CV_64FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64fc_p : 0;
1238 const char* transa = flags & GEMM_1_T ? "t" : "n";
1239 const char* transb = flags & GEMM_2_T ? "t" : "n";
1244 if( C->data.ptr != D->data.ptr )
1246 if( !(flags & GEMM_3_T) )
1249 cvTranspose( C, D );
1253 if( CV_MAT_DEPTH(type) == CV_32F )
1255 Complex32f _alpha, _beta;
1257 lda = A->step/sizeof(float);
1258 ldb = b_step/sizeof(float);
1259 ldd = D->step/sizeof(float);
1260 _alpha.re = (float)alpha;
1262 _beta.re = C->data.ptr ? (float)beta : 0;
1264 if( CV_MAT_CN(type) == 2 )
1265 lda /= 2, ldb /= 2, ldd /= 2;
1267 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1268 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1269 &_beta, D->data.ptr, &ldd );
1273 CvComplex64f _alpha, _beta;
1275 lda = A->step/sizeof(double);
1276 ldb = b_step/sizeof(double);
1277 ldd = D->step/sizeof(double);
1280 _beta.re = C->data.ptr ? beta : 0;
1282 if( CV_MAT_CN(type) == 2 )
1283 lda /= 2, ldb /= 2, ldd /= 2;
1285 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1286 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1287 &_beta, D->data.ptr, &ldd );
1290 else*/ if( ((d_size.height <= block_lin_size/2 || d_size.width <= block_lin_size/2) &&
1291 len <= 10000) || len <= 10 ||
1292 (d_size.width <= block_lin_size &&
1293 d_size.height <= block_lin_size && len <= block_lin_size) )
1295 if( tmat_size > 0 ) {
1296 buf.allocate(tmat_size);
1297 tmat = Mat(d_size.height, d_size.width, type, (uchar*)buf );
1299 singleMulFunc( A.ptr(), A.step, B.ptr(), b_step, Cdata, Cstep,
1300 matD->ptr(), matD->step, a_size, d_size, alpha, beta, flags );
1304 int is_a_t = flags & GEMM_1_T;
1305 int is_b_t = flags & GEMM_2_T;
1306 int elem_size = CV_ELEM_SIZE(type);
1308 int a_buf_size = 0, b_buf_size, d_buf_size;
1312 int j, k, di = 0, dj = 0, dk = 0;
1314 size_t a_step0, a_step1, b_step0, b_step1, c_step0, c_step1;
1315 int work_elem_size = elem_size << (CV_MAT_DEPTH(type) == CV_32F ? 1 : 0);
1318 a_step0 = A.step, a_step1 = elem_size;
1320 a_step0 = elem_size, a_step1 = A.step;
1323 b_step0 = b_step, b_step1 = elem_size;
1325 b_step0 = elem_size, b_step1 = b_step;
1329 c_step0 = c_step1 = 0;
1332 else if( !(flags & GEMM_3_T) )
1333 c_step0 = C.step, c_step1 = elem_size;
1335 c_step0 = elem_size, c_step1 = C.step;
1337 dm0 = std::min( block_lin_size, d_size.height );
1338 dn0 = std::min( block_lin_size, d_size.width );
1339 dk0_1 = block_size / dm0;
1340 dk0_2 = block_size / dn0;
1341 dk0 = std::min( dk0_1, dk0_2 );
1342 dk0 = std::min( dk0, len );
1343 if( dk0*dm0 > block_size )
1344 dm0 = block_size / dk0;
1345 if( dk0*dn0 > block_size )
1346 dn0 = block_size / dk0;
1348 dk0_1 = (dn0+dn0/8+2) & -2;
1349 b_buf_size = (dk0+dk0/8+1)*dk0_1*elem_size;
1350 d_buf_size = (dk0+dk0/8+1)*dk0_1*work_elem_size;
1354 a_buf_size = (dm0+dm0/8+1)*((dk0+dk0/8+2)&-2)*elem_size;
1358 buf.allocate(d_buf_size + b_buf_size + a_buf_size + tmat_size);
1359 d_buf = (uchar*)buf;
1360 b_buf = d_buf + d_buf_size;
1363 a_buf = b_buf + b_buf_size;
1365 tmat = Mat(d_size.height, d_size.width, type, b_buf + b_buf_size + a_buf_size );
1367 for( i = 0; i < d_size.height; i += di )
1370 if( i + di >= d_size.height || 8*(i + di) + di > 8*d_size.height )
1371 di = d_size.height - i;
1373 for( j = 0; j < d_size.width; j += dj )
1375 uchar* _d = matD->ptr() + i*matD->step + j*elem_size;
1376 const uchar* _c = Cdata + i*c_step0 + j*c_step1;
1377 size_t _d_step = matD->step;
1380 if( j + dj >= d_size.width || 8*(j + dj) + dj > 8*d_size.width )
1381 dj = d_size.width - j;
1387 _d_step = dj*work_elem_size;
1390 for( k = 0; k < len; k += dk )
1392 const uchar* _a = A.ptr() + i*a_step0 + k*a_step1;
1393 size_t _a_step = A.step;
1394 const uchar* _b = B.ptr() + k*b_step0 + j*b_step1;
1395 size_t _b_step = b_step;
1399 if( k + dk >= len || 8*(k + dk) + dk > 8*len )
1403 a_bl_size.width = dk, a_bl_size.height = di;
1405 a_bl_size.width = di, a_bl_size.height = dk;
1407 if( a_buf && is_a_t )
1409 _a_step = dk*elem_size;
1410 GEMM_TransposeBlock( _a, A.step, a_buf, _a_step, a_bl_size, elem_size );
1411 std::swap( a_bl_size.width, a_bl_size.height );
1415 if( dj < d_size.width )
1419 b_size.width = dj, b_size.height = dk;
1421 b_size.width = dk, b_size.height = dj;
1423 _b_step = b_size.width*elem_size;
1424 GEMM_CopyBlock( _b, b_step, b_buf, _b_step, b_size, elem_size );
1429 blockMulFunc( _a, _a_step, _b, _b_step, _d, _d_step,
1430 a_bl_size, Size(dj,di), flags );
1432 singleMulFunc( _a, _a_step, _b, _b_step, _c, Cstep,
1433 _d, _d_step, a_bl_size, Size(dj,di), alpha, beta, flags );
1438 storeFunc( _c, Cstep, _d, _d_step,
1439 matD->ptr(i) + j*elem_size,
1440 matD->step, Size(dj,di), alpha, beta, flags );
1450 /****************************************************************************************\
1452 \****************************************************************************************/
1457 template<typename T, typename WT> static void
1458 transform_( const T* src, T* dst, const WT* m, int len, int scn, int dcn )
1462 if( scn == 2 && dcn == 2 )
1464 for( x = 0; x < len*2; x += 2 )
1466 WT v0 = src[x], v1 = src[x+1];
1467 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]);
1468 T t1 = saturate_cast<T>(m[3]*v0 + m[4]*v1 + m[5]);
1469 dst[x] = t0; dst[x+1] = t1;
1472 else if( scn == 3 && dcn == 3 )
1474 for( x = 0; x < len*3; x += 3 )
1476 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1477 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1478 T t1 = saturate_cast<T>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1479 T t2 = saturate_cast<T>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1480 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1483 else if( scn == 3 && dcn == 1 )
1485 for( x = 0; x < len; x++, src += 3 )
1486 dst[x] = saturate_cast<T>(m[0]*src[0] + m[1]*src[1] + m[2]*src[2] + m[3]);
1488 else if( scn == 4 && dcn == 4 )
1490 for( x = 0; x < len*4; x += 4 )
1492 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2], v3 = src[x+3];
1493 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]*v3 + m[4]);
1494 T t1 = saturate_cast<T>(m[5]*v0 + m[6]*v1 + m[7]*v2 + m[8]*v3 + m[9]);
1495 dst[x] = t0; dst[x+1] = t1;
1496 t0 = saturate_cast<T>(m[10]*v0 + m[11]*v1 + m[12]*v2 + m[13]*v3 + m[14]);
1497 t1 = saturate_cast<T>(m[15]*v0 + m[16]*v1 + m[17]*v2 + m[18]*v3 + m[19]);
1498 dst[x+2] = t0; dst[x+3] = t1;
1503 for( x = 0; x < len; x++, src += scn, dst += dcn )
1507 for( j = 0; j < dcn; j++, _m += scn + 1 )
1510 for( k = 0; k < scn; k++ )
1512 dst[j] = saturate_cast<T>(s);
1521 load3x3Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3 )
1523 m0 = _mm_setr_ps(m[0], m[4], m[8], 0);
1524 m1 = _mm_setr_ps(m[1], m[5], m[9], 0);
1525 m2 = _mm_setr_ps(m[2], m[6], m[10], 0);
1526 m3 = _mm_setr_ps(m[3], m[7], m[11], 0);
1530 load4x4Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3, __m128& m4 )
1532 m0 = _mm_setr_ps(m[0], m[5], m[10], m[15]);
1533 m1 = _mm_setr_ps(m[1], m[6], m[11], m[16]);
1534 m2 = _mm_setr_ps(m[2], m[7], m[12], m[17]);
1535 m3 = _mm_setr_ps(m[3], m[8], m[13], m[18]);
1536 m4 = _mm_setr_ps(m[4], m[9], m[14], m[19]);
1542 transform_8u( const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn )
1545 const int BITS = 10, SCALE = 1 << BITS;
1546 const float MAX_M = (float)(1 << (15 - BITS));
1548 if( USE_SSE2 && scn == 3 && dcn == 3 &&
1549 std::abs(m[0]) < MAX_M && std::abs(m[1]) < MAX_M && std::abs(m[2]) < MAX_M && std::abs(m[3]) < MAX_M*256 &&
1550 std::abs(m[4]) < MAX_M && std::abs(m[5]) < MAX_M && std::abs(m[6]) < MAX_M && std::abs(m[7]) < MAX_M*256 &&
1551 std::abs(m[8]) < MAX_M && std::abs(m[9]) < MAX_M && std::abs(m[10]) < MAX_M && std::abs(m[11]) < MAX_M*256 )
1553 // faster fixed-point transformation
1554 short m00 = saturate_cast<short>(m[0]*SCALE), m01 = saturate_cast<short>(m[1]*SCALE),
1555 m02 = saturate_cast<short>(m[2]*SCALE), m10 = saturate_cast<short>(m[4]*SCALE),
1556 m11 = saturate_cast<short>(m[5]*SCALE), m12 = saturate_cast<short>(m[6]*SCALE),
1557 m20 = saturate_cast<short>(m[8]*SCALE), m21 = saturate_cast<short>(m[9]*SCALE),
1558 m22 = saturate_cast<short>(m[10]*SCALE);
1559 int m03 = saturate_cast<int>((m[3]+0.5f)*SCALE), m13 = saturate_cast<int>((m[7]+0.5f)*SCALE ),
1560 m23 = saturate_cast<int>((m[11]+0.5f)*SCALE);
1562 __m128i m0 = _mm_setr_epi16(0, m00, m01, m02, m00, m01, m02, 0);
1563 __m128i m1 = _mm_setr_epi16(0, m10, m11, m12, m10, m11, m12, 0);
1564 __m128i m2 = _mm_setr_epi16(0, m20, m21, m22, m20, m21, m22, 0);
1565 __m128i m3 = _mm_setr_epi32(m03, m13, m23, 0);
1568 for( ; x <= (len - 8)*3; x += 8*3 )
1570 __m128i z = _mm_setzero_si128(), t0, t1, t2, r0, r1;
1571 __m128i v0 = _mm_loadl_epi64((const __m128i*)(src + x));
1572 __m128i v1 = _mm_loadl_epi64((const __m128i*)(src + x + 8));
1573 __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 16)), v3;
1574 v0 = _mm_unpacklo_epi8(v0, z); // b0 g0 r0 b1 g1 r1 b2 g2
1575 v1 = _mm_unpacklo_epi8(v1, z); // r2 b3 g3 r3 b4 g4 r4 b5
1576 v2 = _mm_unpacklo_epi8(v2, z); // g5 r5 b6 g6 r6 b7 g7 r7
1578 v3 = _mm_srli_si128(v2, 2); // ? b6 g6 r6 b7 g7 r7 0
1579 v2 = _mm_or_si128(_mm_slli_si128(v2, 10), _mm_srli_si128(v1, 6)); // ? b4 g4 r4 b5 g5 r5 ?
1580 v1 = _mm_or_si128(_mm_slli_si128(v1, 6), _mm_srli_si128(v0, 10)); // ? b2 g2 r2 b3 g3 r3 ?
1581 v0 = _mm_slli_si128(v0, 2); // 0 b0 g0 r0 b1 g1 r1 ?
1583 // process pixels 0 & 1
1584 t0 = _mm_madd_epi16(v0, m0); // a0 b0 a1 b1
1585 t1 = _mm_madd_epi16(v0, m1); // c0 d0 c1 d1
1586 t2 = _mm_madd_epi16(v0, m2); // e0 f0 e1 f1
1587 v0 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1588 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1589 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1590 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1591 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v0, t1), _mm_unpackhi_epi64(v0,t1)), m3); // B0 G0 R0 0
1592 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B1 G1 R1 0
1593 r0 = _mm_srai_epi32(r0, BITS);
1594 r1 = _mm_srai_epi32(r1, BITS);
1595 v0 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B0 G0 R0 B1 G1 R1 0
1597 // process pixels 2 & 3
1598 t0 = _mm_madd_epi16(v1, m0); // a0 b0 a1 b1
1599 t1 = _mm_madd_epi16(v1, m1); // c0 d0 c1 d1
1600 t2 = _mm_madd_epi16(v1, m2); // e0 f0 e1 f1
1601 v1 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1602 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1603 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1604 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1605 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v1, t1), _mm_unpackhi_epi64(v1,t1)), m3); // B2 G2 R2 0
1606 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B3 G3 R3 0
1607 r0 = _mm_srai_epi32(r0, BITS);
1608 r1 = _mm_srai_epi32(r1, BITS);
1609 v1 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B2 G2 R2 B3 G3 R3 0
1611 // process pixels 4 & 5
1612 t0 = _mm_madd_epi16(v2, m0); // a0 b0 a1 b1
1613 t1 = _mm_madd_epi16(v2, m1); // c0 d0 c1 d1
1614 t2 = _mm_madd_epi16(v2, m2); // e0 f0 e1 f1
1615 v2 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1616 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1617 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1618 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1619 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v2, t1), _mm_unpackhi_epi64(v2,t1)), m3); // B4 G4 R4 0
1620 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B5 G5 R5 0
1621 r0 = _mm_srai_epi32(r0, BITS);
1622 r1 = _mm_srai_epi32(r1, BITS);
1623 v2 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B4 G4 R4 B5 G5 R5 0
1625 // process pixels 6 & 7
1626 t0 = _mm_madd_epi16(v3, m0); // a0 b0 a1 b1
1627 t1 = _mm_madd_epi16(v3, m1); // c0 d0 c1 d1
1628 t2 = _mm_madd_epi16(v3, m2); // e0 f0 e1 f1
1629 v3 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1630 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1631 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1632 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1633 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v3, t1), _mm_unpackhi_epi64(v3,t1)), m3); // B6 G6 R6 0
1634 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B7 G7 R7 0
1635 r0 = _mm_srai_epi32(r0, BITS);
1636 r1 = _mm_srai_epi32(r1, BITS);
1637 v3 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B6 G6 R6 B7 G7 R7 0
1639 v0 = _mm_or_si128(_mm_srli_si128(v0, 1), _mm_slli_si128(v1, 5));
1640 v1 = _mm_or_si128(_mm_srli_si128(v1, 3), _mm_slli_si128(v2, 3));
1641 v2 = _mm_or_si128(_mm_srli_si128(v2, 5), _mm_slli_si128(v3, 1));
1642 _mm_storel_epi64((__m128i*)(dst + x), v0);
1643 _mm_storel_epi64((__m128i*)(dst + x + 8), v1);
1644 _mm_storel_epi64((__m128i*)(dst + x + 16), v2);
1647 for( ; x < len*3; x += 3 )
1649 int v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1650 uchar t0 = saturate_cast<uchar>((m00*v0 + m01*v1 + m02*v2 + m03)>>BITS);
1651 uchar t1 = saturate_cast<uchar>((m10*v0 + m11*v1 + m12*v2 + m13)>>BITS);
1652 uchar t2 = saturate_cast<uchar>((m20*v0 + m21*v1 + m22*v2 + m23)>>BITS);
1653 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1659 transform_(src, dst, m, len, scn, dcn);
1663 transform_16u( const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn )
1666 if( USE_SSE2 && scn == 3 && dcn == 3 )
1668 __m128 m0, m1, m2, m3;
1669 __m128i delta = _mm_setr_epi16(0,-32768,-32768,-32768,-32768,-32768,-32768,0);
1670 load3x3Matrix(m, m0, m1, m2, m3);
1671 m3 = _mm_sub_ps(m3, _mm_setr_ps(32768.f, 32768.f, 32768.f, 0.f));
1674 for( ; x <= (len - 4)*3; x += 4*3 )
1676 __m128i z = _mm_setzero_si128();
1677 __m128i v0 = _mm_loadu_si128((const __m128i*)(src + x)), v1;
1678 __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 8)), v3;
1679 v1 = _mm_unpacklo_epi16(_mm_srli_si128(v0, 6), z); // b1 g1 r1
1680 v3 = _mm_unpacklo_epi16(_mm_srli_si128(v2, 2), z); // b3 g3 r3
1681 v2 = _mm_or_si128(_mm_srli_si128(v0, 12), _mm_slli_si128(v2, 4));
1682 v0 = _mm_unpacklo_epi16(v0, z); // b0 g0 r0
1683 v2 = _mm_unpacklo_epi16(v2, z); // b2 g2 r2
1684 __m128 x0 = _mm_cvtepi32_ps(v0), x1 = _mm_cvtepi32_ps(v1);
1685 __m128 x2 = _mm_cvtepi32_ps(v2), x3 = _mm_cvtepi32_ps(v3);
1686 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1687 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1688 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1689 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3);
1690 __m128 y1 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1691 _mm_mul_ps(m0, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(0,0,0,0))),
1692 _mm_mul_ps(m1, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(1,1,1,1)))),
1693 _mm_mul_ps(m2, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(2,2,2,2)))), m3);
1694 __m128 y2 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1695 _mm_mul_ps(m0, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(0,0,0,0))),
1696 _mm_mul_ps(m1, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(1,1,1,1)))),
1697 _mm_mul_ps(m2, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(2,2,2,2)))), m3);
1698 __m128 y3 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1699 _mm_mul_ps(m0, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(0,0,0,0))),
1700 _mm_mul_ps(m1, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(1,1,1,1)))),
1701 _mm_mul_ps(m2, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(2,2,2,2)))), m3);
1702 v0 = _mm_cvtps_epi32(y0); v1 = _mm_cvtps_epi32(y1);
1703 v2 = _mm_cvtps_epi32(y2); v3 = _mm_cvtps_epi32(y3);
1705 v0 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v0,4), v1), delta); // 0 b0 g0 r0 b1 g1 r1 0
1706 v2 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v2,4), v3), delta); // 0 b2 g2 r2 b3 g3 r3 0
1707 v1 = _mm_or_si128(_mm_srli_si128(v0,2), _mm_slli_si128(v2,10)); // b0 g0 r0 b1 g1 r1 b2 g2
1708 v2 = _mm_srli_si128(v2, 6); // r2 b3 g3 r3 0 0 0 0
1709 _mm_storeu_si128((__m128i*)(dst + x), v1);
1710 _mm_storel_epi64((__m128i*)(dst + x + 8), v2);
1713 for( ; x < len*3; x += 3 )
1715 float v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1716 ushort t0 = saturate_cast<ushort>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1717 ushort t1 = saturate_cast<ushort>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1718 ushort t2 = saturate_cast<ushort>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1719 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1725 transform_(src, dst, m, len, scn, dcn);
1730 transform_32f( const float* src, float* dst, const float* m, int len, int scn, int dcn )
1736 if( scn == 3 && dcn == 3 )
1738 __m128 m0, m1, m2, m3;
1739 load3x3Matrix(m, m0, m1, m2, m3);
1741 for( ; x < (len - 1)*3; x += 3 )
1743 __m128 x0 = _mm_loadu_ps(src + x);
1744 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1745 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1746 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1747 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3);
1748 _mm_storel_pi((__m64*)(dst + x), y0);
1749 _mm_store_ss(dst + x + 2, _mm_movehl_ps(y0,y0));
1752 for( ; x < len*3; x += 3 )
1754 float v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1755 float t0 = saturate_cast<float>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1756 float t1 = saturate_cast<float>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1757 float t2 = saturate_cast<float>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1758 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1763 if( scn == 4 && dcn == 4 )
1765 __m128 m0, m1, m2, m3, m4;
1766 load4x4Matrix(m, m0, m1, m2, m3, m4);
1768 for( ; x < len*4; x += 4 )
1770 __m128 x0 = _mm_loadu_ps(src + x);
1771 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(_mm_add_ps(
1772 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1773 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1774 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))),
1775 _mm_mul_ps(m3, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(3,3,3,3)))), m4);
1776 _mm_storeu_ps(dst + x, y0);
1783 transform_(src, dst, m, len, scn, dcn);
1788 transform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn)
1790 transform_(src, dst, m, len, scn, dcn);
1794 transform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn)
1796 transform_(src, dst, m, len, scn, dcn);
1800 transform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn)
1802 transform_(src, dst, m, len, scn, dcn);
1806 transform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
1808 transform_(src, dst, m, len, scn, dcn);
1811 template<typename T, typename WT> static void
1812 diagtransform_( const T* src, T* dst, const WT* m, int len, int cn, int )
1818 for( x = 0; x < len*2; x += 2 )
1820 T t0 = saturate_cast<T>(m[0]*src[x] + m[2]);
1821 T t1 = saturate_cast<T>(m[4]*src[x+1] + m[5]);
1822 dst[x] = t0; dst[x+1] = t1;
1827 for( x = 0; x < len*3; x += 3 )
1829 T t0 = saturate_cast<T>(m[0]*src[x] + m[3]);
1830 T t1 = saturate_cast<T>(m[5]*src[x+1] + m[7]);
1831 T t2 = saturate_cast<T>(m[10]*src[x+2] + m[11]);
1832 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1837 for( x = 0; x < len*4; x += 4 )
1839 T t0 = saturate_cast<T>(m[0]*src[x] + m[4]);
1840 T t1 = saturate_cast<T>(m[6]*src[x+1] + m[9]);
1841 dst[x] = t0; dst[x+1] = t1;
1842 t0 = saturate_cast<T>(m[12]*src[x+2] + m[14]);
1843 t1 = saturate_cast<T>(m[18]*src[x+3] + m[19]);
1844 dst[x+2] = t0; dst[x+3] = t1;
1849 for( x = 0; x < len; x++, src += cn, dst += cn )
1852 for( int j = 0; j < cn; j++, _m += cn + 1 )
1853 dst[j] = saturate_cast<T>(src[j]*_m[j] + _m[cn]);
1859 diagtransform_8u(const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn)
1861 diagtransform_(src, dst, m, len, scn, dcn);
1865 diagtransform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn)
1867 diagtransform_(src, dst, m, len, scn, dcn);
1871 diagtransform_16u(const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn)
1873 diagtransform_(src, dst, m, len, scn, dcn);
1877 diagtransform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn)
1879 diagtransform_(src, dst, m, len, scn, dcn);
1883 diagtransform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn)
1885 diagtransform_(src, dst, m, len, scn, dcn);
1889 diagtransform_32f(const float* src, float* dst, const float* m, int len, int scn, int dcn)
1891 diagtransform_(src, dst, m, len, scn, dcn);
1895 diagtransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
1897 diagtransform_(src, dst, m, len, scn, dcn);
1901 typedef void (*TransformFunc)( const uchar* src, uchar* dst, const uchar* m, int, int, int );
1903 static TransformFunc getTransformFunc(int depth)
1905 static TransformFunc transformTab[] =
1907 (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u,
1908 (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f,
1909 (TransformFunc)transform_64f, 0
1912 return transformTab[depth];
1915 static TransformFunc getDiagTransformFunc(int depth)
1917 static TransformFunc diagTransformTab[] =
1919 (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u,
1920 (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f,
1921 (TransformFunc)diagtransform_64f, 0
1924 return diagTransformTab[depth];
1929 void cv::transform( InputArray _src, OutputArray _dst, InputArray _mtx )
1931 Mat src = _src.getMat(), m = _mtx.getMat();
1932 int depth = src.depth(), scn = src.channels(), dcn = m.rows;
1933 CV_Assert( scn == m.cols || scn + 1 == m.cols );
1934 bool isDiag = false;
1936 _dst.create( src.size(), CV_MAKETYPE(depth, dcn) );
1937 Mat dst = _dst.getMat();
1939 int mtype = depth == CV_32S || depth == CV_64F ? CV_64F : CV_32F;
1940 AutoBuffer<double> _mbuf;
1943 if( !m.isContinuous() || m.type() != mtype || m.cols != scn + 1 )
1945 _mbuf.allocate(dcn*(scn+1));
1946 mbuf = (double*)_mbuf;
1947 Mat tmp(dcn, scn+1, mtype, mbuf);
1948 memset(tmp.ptr(), 0, tmp.total()*tmp.elemSize());
1949 if( m.cols == scn+1 )
1950 m.convertTo(tmp, mtype);
1953 Mat tmppart = tmp.colRange(0, m.cols);
1954 m.convertTo(tmppart, mtype);
1959 mbuf = m.ptr<double>();
1964 double eps = mtype == CV_32F ? FLT_EPSILON : DBL_EPSILON;
1969 if( mtype == CV_32F )
1970 alpha = m.at<float>(0), beta = m.at<float>(1);
1972 alpha = m.at<double>(0), beta = m.at<double>(1);
1973 src.convertTo(dst, dst.type(), alpha, beta);
1977 for( i = 0, isDiag = true; isDiag && i < scn; i++ )
1979 for( j = 0; isDiag && j < scn; j++ )
1981 double v = mtype == CV_32F ? m.at<float>(i, j) : m.at<double>(i, j);
1982 if( i != j && fabs(v) > eps )
1988 TransformFunc func = isDiag ? getDiagTransformFunc(depth): getTransformFunc(depth);
1989 CV_Assert( func != 0 );
1991 const Mat* arrays[] = {&src, &dst, 0};
1993 NAryMatIterator it(arrays, ptrs);
1994 size_t i, total = it.size;
1996 for( i = 0; i < it.nplanes; i++, ++it )
1997 func( ptrs[0], ptrs[1], (uchar*)mbuf, (int)total, scn, dcn );
2000 /****************************************************************************************\
2001 * Perspective Transform *
2002 \****************************************************************************************/
2007 template<typename T> static void
2008 perspectiveTransform_( const T* src, T* dst, const double* m, int len, int scn, int dcn )
2010 const double eps = FLT_EPSILON;
2013 if( scn == 2 && dcn == 2 )
2015 for( i = 0; i < len*2; i += 2 )
2017 T x = src[i], y = src[i + 1];
2018 double w = x*m[6] + y*m[7] + m[8];
2023 dst[i] = (T)((x*m[0] + y*m[1] + m[2])*w);
2024 dst[i+1] = (T)((x*m[3] + y*m[4] + m[5])*w);
2027 dst[i] = dst[i+1] = (T)0;
2030 else if( scn == 3 && dcn == 3 )
2032 for( i = 0; i < len*3; i += 3 )
2034 T x = src[i], y = src[i + 1], z = src[i + 2];
2035 double w = x*m[12] + y*m[13] + z*m[14] + m[15];
2040 dst[i] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3]) * w);
2041 dst[i+1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7]) * w);
2042 dst[i+2] = (T)((x*m[8] + y*m[9] + z*m[10] + m[11]) * w);
2045 dst[i] = dst[i+1] = dst[i+2] = (T)0;
2048 else if( scn == 3 && dcn == 2 )
2050 for( i = 0; i < len; i++, src += 3, dst += 2 )
2052 T x = src[0], y = src[1], z = src[2];
2053 double w = x*m[8] + y*m[9] + z*m[10] + m[11];
2058 dst[0] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3])*w);
2059 dst[1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7])*w);
2062 dst[0] = dst[1] = (T)0;
2067 for( i = 0; i < len; i++, src += scn, dst += dcn )
2069 const double* _m = m + dcn*(scn + 1);
2072 for( k = 0; k < scn; k++ )
2077 for( j = 0; j < dcn; j++, _m += scn + 1 )
2080 for( k = 0; k < scn; k++ )
2086 for( j = 0; j < dcn; j++ )
2094 perspectiveTransform_32f(const float* src, float* dst, const double* m, int len, int scn, int dcn)
2096 perspectiveTransform_(src, dst, m, len, scn, dcn);
2100 perspectiveTransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
2102 perspectiveTransform_(src, dst, m, len, scn, dcn);
2107 void cv::perspectiveTransform( InputArray _src, OutputArray _dst, InputArray _mtx )
2109 Mat src = _src.getMat(), m = _mtx.getMat();
2110 int depth = src.depth(), scn = src.channels(), dcn = m.rows-1;
2111 CV_Assert( scn + 1 == m.cols );
2112 CV_Assert( depth == CV_32F || depth == CV_64F );
2114 _dst.create( src.size(), CV_MAKETYPE(depth, dcn) );
2115 Mat dst = _dst.getMat();
2117 const int mtype = CV_64F;
2118 AutoBuffer<double> _mbuf;
2119 double* mbuf = _mbuf;
2121 if( !m.isContinuous() || m.type() != mtype )
2123 _mbuf.allocate((dcn+1)*(scn+1));
2124 Mat tmp(dcn+1, scn+1, mtype, (double*)_mbuf);
2125 m.convertTo(tmp, mtype);
2129 mbuf = m.ptr<double>();
2131 TransformFunc func = depth == CV_32F ?
2132 (TransformFunc)perspectiveTransform_32f :
2133 (TransformFunc)perspectiveTransform_64f;
2134 CV_Assert( func != 0 );
2136 const Mat* arrays[] = {&src, &dst, 0};
2138 NAryMatIterator it(arrays, ptrs);
2139 size_t i, total = it.size;
2141 for( i = 0; i < it.nplanes; i++, ++it )
2142 func( ptrs[0], ptrs[1], (uchar*)mbuf, (int)total, scn, dcn );
2145 /****************************************************************************************\
2147 \****************************************************************************************/
2152 static void scaleAdd_32f(const float* src1, const float* src2, float* dst,
2153 int len, float* _alpha)
2155 float alpha = *_alpha;
2160 __m128 a4 = _mm_set1_ps(alpha);
2161 if( (((size_t)src1|(size_t)src2|(size_t)dst) & 15) == 0 )
2162 for( ; i <= len - 8; i += 8 )
2164 __m128 x0, x1, y0, y1, t0, t1;
2165 x0 = _mm_load_ps(src1 + i); x1 = _mm_load_ps(src1 + i + 4);
2166 y0 = _mm_load_ps(src2 + i); y1 = _mm_load_ps(src2 + i + 4);
2167 t0 = _mm_add_ps(_mm_mul_ps(x0, a4), y0);
2168 t1 = _mm_add_ps(_mm_mul_ps(x1, a4), y1);
2169 _mm_store_ps(dst + i, t0);
2170 _mm_store_ps(dst + i + 4, t1);
2173 for( ; i <= len - 8; i += 8 )
2175 __m128 x0, x1, y0, y1, t0, t1;
2176 x0 = _mm_loadu_ps(src1 + i); x1 = _mm_loadu_ps(src1 + i + 4);
2177 y0 = _mm_loadu_ps(src2 + i); y1 = _mm_loadu_ps(src2 + i + 4);
2178 t0 = _mm_add_ps(_mm_mul_ps(x0, a4), y0);
2179 t1 = _mm_add_ps(_mm_mul_ps(x1, a4), y1);
2180 _mm_storeu_ps(dst + i, t0);
2181 _mm_storeu_ps(dst + i + 4, t1);
2188 for ( ; i <= len - 4; i += 4)
2190 float32x4_t v_src1 = vld1q_f32(src1 + i), v_src2 = vld1q_f32(src2 + i);
2191 vst1q_f32(dst + i, vaddq_f32(vmulq_n_f32(v_src1, alpha), v_src2));
2196 //vz why do we need unroll here?
2197 for( ; i <= len - 4; i += 4 )
2200 t0 = src1[i]*alpha + src2[i];
2201 t1 = src1[i+1]*alpha + src2[i+1];
2202 dst[i] = t0; dst[i+1] = t1;
2203 t0 = src1[i+2]*alpha + src2[i+2];
2204 t1 = src1[i+3]*alpha + src2[i+3];
2205 dst[i+2] = t0; dst[i+3] = t1;
2207 for(; i < len; i++ )
2208 dst[i] = src1[i]*alpha + src2[i];
2212 static void scaleAdd_64f(const double* src1, const double* src2, double* dst,
2213 int len, double* _alpha)
2215 double alpha = *_alpha;
2218 if( USE_SSE2 && (((size_t)src1|(size_t)src2|(size_t)dst) & 15) == 0 )
2220 __m128d a2 = _mm_set1_pd(alpha);
2221 for( ; i <= len - 4; i += 4 )
2223 __m128d x0, x1, y0, y1, t0, t1;
2224 x0 = _mm_load_pd(src1 + i); x1 = _mm_load_pd(src1 + i + 2);
2225 y0 = _mm_load_pd(src2 + i); y1 = _mm_load_pd(src2 + i + 2);
2226 t0 = _mm_add_pd(_mm_mul_pd(x0, a2), y0);
2227 t1 = _mm_add_pd(_mm_mul_pd(x1, a2), y1);
2228 _mm_store_pd(dst + i, t0);
2229 _mm_store_pd(dst + i + 2, t1);
2234 //vz why do we need unroll here?
2235 for( ; i <= len - 4; i += 4 )
2238 t0 = src1[i]*alpha + src2[i];
2239 t1 = src1[i+1]*alpha + src2[i+1];
2240 dst[i] = t0; dst[i+1] = t1;
2241 t0 = src1[i+2]*alpha + src2[i+2];
2242 t1 = src1[i+3]*alpha + src2[i+3];
2243 dst[i+2] = t0; dst[i+3] = t1;
2245 for(; i < len; i++ )
2246 dst[i] = src1[i]*alpha + src2[i];
2249 typedef void (*ScaleAddFunc)(const uchar* src1, const uchar* src2, uchar* dst, int len, const void* alpha);
2253 static bool ocl_scaleAdd( InputArray _src1, double alpha, InputArray _src2, OutputArray _dst, int type )
2255 const ocl::Device & d = ocl::Device::getDefault();
2257 bool doubleSupport = d.doubleFPConfig() > 0;
2258 Size size = _src1.size();
2259 int depth = CV_MAT_DEPTH(type);
2260 if ( (!doubleSupport && depth == CV_64F) || size != _src2.size() )
2263 _dst.create(size, type);
2264 int cn = CV_MAT_CN(type), wdepth = std::max(depth, CV_32F);
2265 int kercn = ocl::predictOptimalVectorWidthMax(_src1, _src2, _dst),
2266 rowsPerWI = d.isIntel() ? 4 : 1;
2269 ocl::Kernel k("KF", ocl::core::arithm_oclsrc,
2270 format("-D OP_SCALE_ADD -D BINARY_OP -D dstT=%s -D workT=%s -D convertToWT1=%s"
2271 " -D srcT1=dstT -D srcT2=dstT -D convertToDT=%s -D workT1=%s"
2272 " -D wdepth=%d%s -D rowsPerWI=%d",
2273 ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
2274 ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)),
2275 ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
2276 ocl::convertTypeStr(wdepth, depth, kercn, cvt[1]),
2277 ocl::typeToStr(wdepth), wdepth,
2278 doubleSupport ? " -D DOUBLE_SUPPORT" : "", rowsPerWI));
2282 UMat src1 = _src1.getUMat(), src2 = _src2.getUMat(), dst = _dst.getUMat();
2284 ocl::KernelArg src1arg = ocl::KernelArg::ReadOnlyNoSize(src1),
2285 src2arg = ocl::KernelArg::ReadOnlyNoSize(src2),
2286 dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
2288 if (wdepth == CV_32F)
2289 k.args(src1arg, src2arg, dstarg, (float)alpha);
2291 k.args(src1arg, src2arg, dstarg, alpha);
2293 size_t globalsize[2] = { dst.cols * cn / kercn, (dst.rows + rowsPerWI - 1) / rowsPerWI };
2294 return k.run(2, globalsize, NULL, false);
2301 void cv::scaleAdd( InputArray _src1, double alpha, InputArray _src2, OutputArray _dst )
2303 int type = _src1.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
2304 CV_Assert( type == _src2.type() );
2306 CV_OCL_RUN(_src1.dims() <= 2 && _src2.dims() <= 2 && _dst.isUMat(),
2307 ocl_scaleAdd(_src1, alpha, _src2, _dst, type))
2309 if( depth < CV_32F )
2311 addWeighted(_src1, alpha, _src2, 1, 0, _dst, depth);
2315 Mat src1 = _src1.getMat(), src2 = _src2.getMat();
2316 CV_Assert(src1.size == src2.size);
2318 _dst.create(src1.dims, src1.size, type);
2319 Mat dst = _dst.getMat();
2321 float falpha = (float)alpha;
2322 void* palpha = depth == CV_32F ? (void*)&falpha : (void*)α
2324 ScaleAddFunc func = depth == CV_32F ? (ScaleAddFunc)scaleAdd_32f : (ScaleAddFunc)scaleAdd_64f;
2326 if (src1.isContinuous() && src2.isContinuous() && dst.isContinuous())
2328 size_t len = src1.total()*cn;
2329 func(src1.ptr(), src2.ptr(), dst.ptr(), (int)len, palpha);
2333 const Mat* arrays[] = {&src1, &src2, &dst, 0};
2335 NAryMatIterator it(arrays, ptrs);
2336 size_t i, len = it.size*cn;
2338 for( i = 0; i < it.nplanes; i++, ++it )
2339 func( ptrs[0], ptrs[1], ptrs[2], (int)len, palpha );
2342 /****************************************************************************************\
2343 * Covariation Matrix *
2344 \****************************************************************************************/
2346 void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean, int flags, int ctype )
2348 CV_Assert( data && nsamples > 0 );
2349 Size size = data[0].size();
2350 int sz = size.width * size.height, esz = (int)data[0].elemSize();
2351 int type = data[0].type();
2353 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), _mean.depth()), CV_32F);
2355 if( (flags & CV_COVAR_USE_AVG) != 0 )
2357 CV_Assert( _mean.size() == size );
2358 if( _mean.isContinuous() && _mean.type() == ctype )
2359 mean = _mean.reshape(1, 1);
2362 _mean.convertTo(mean, ctype);
2363 mean = mean.reshape(1, 1);
2367 Mat _data(nsamples, sz, type);
2369 for( int i = 0; i < nsamples; i++ )
2371 CV_Assert( data[i].size() == size && data[i].type() == type );
2372 if( data[i].isContinuous() )
2373 memcpy( _data.ptr(i), data[i].ptr(), sz*esz );
2376 Mat dataRow(size.height, size.width, type, _data.ptr(i));
2377 data[i].copyTo(dataRow);
2381 calcCovarMatrix( _data, covar, mean, (flags & ~(CV_COVAR_ROWS|CV_COVAR_COLS)) | CV_COVAR_ROWS, ctype );
2382 if( (flags & CV_COVAR_USE_AVG) == 0 )
2383 _mean = mean.reshape(1, size.height);
2386 void cv::calcCovarMatrix( InputArray _src, OutputArray _covar, InputOutputArray _mean, int flags, int ctype )
2388 if(_src.kind() == _InputArray::STD_VECTOR_MAT)
2390 std::vector<cv::Mat> src;
2391 _src.getMatVector(src);
2393 CV_Assert( src.size() > 0 );
2395 Size size = src[0].size();
2396 int type = src[0].type();
2398 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), _mean.depth()), CV_32F);
2400 Mat _data(static_cast<int>(src.size()), size.area(), type);
2403 for(std::vector<cv::Mat>::iterator each = src.begin(); each != src.end(); each++, i++ )
2405 CV_Assert( (*each).size() == size && (*each).type() == type );
2406 Mat dataRow(size.height, size.width, type, _data.ptr(i));
2407 (*each).copyTo(dataRow);
2411 if( (flags & CV_COVAR_USE_AVG) != 0 )
2413 CV_Assert( _mean.size() == size );
2415 if( mean.type() != ctype )
2417 mean = _mean.getMat();
2418 _mean.create(mean.size(), ctype);
2419 Mat tmp = _mean.getMat();
2420 mean.convertTo(tmp, ctype);
2424 mean = _mean.getMat().reshape(1, 1);
2427 calcCovarMatrix( _data, _covar, mean, (flags & ~(CV_COVAR_ROWS|CV_COVAR_COLS)) | CV_COVAR_ROWS, ctype );
2429 if( (flags & CV_COVAR_USE_AVG) == 0 )
2431 mean = mean.reshape(1, size.height);
2437 Mat data = _src.getMat(), mean;
2438 CV_Assert( ((flags & CV_COVAR_ROWS) != 0) ^ ((flags & CV_COVAR_COLS) != 0) );
2439 bool takeRows = (flags & CV_COVAR_ROWS) != 0;
2440 int type = data.type();
2441 int nsamples = takeRows ? data.rows : data.cols;
2442 CV_Assert( nsamples > 0 );
2443 Size size = takeRows ? Size(data.cols, 1) : Size(1, data.rows);
2445 if( (flags & CV_COVAR_USE_AVG) != 0 )
2447 mean = _mean.getMat();
2448 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), mean.depth()), CV_32F);
2449 CV_Assert( mean.size() == size );
2450 if( mean.type() != ctype )
2452 _mean.create(mean.size(), ctype);
2453 Mat tmp = _mean.getMat();
2454 mean.convertTo(tmp, ctype);
2460 ctype = std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), CV_32F);
2461 reduce( _src, _mean, takeRows ? 0 : 1, CV_REDUCE_AVG, ctype );
2462 mean = _mean.getMat();
2465 mulTransposed( data, _covar, ((flags & CV_COVAR_NORMAL) == 0) ^ takeRows,
2466 mean, (flags & CV_COVAR_SCALE) != 0 ? 1./nsamples : 1, ctype );
2469 /****************************************************************************************\
2471 \****************************************************************************************/
2473 double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar )
2475 Mat v1 = _v1.getMat(), v2 = _v2.getMat(), icovar = _icovar.getMat();
2476 int type = v1.type(), depth = v1.depth();
2477 Size sz = v1.size();
2478 int i, j, len = sz.width*sz.height*v1.channels();
2479 AutoBuffer<double> buf(len);
2482 CV_Assert( type == v2.type() && type == icovar.type() &&
2483 sz == v2.size() && len == icovar.rows && len == icovar.cols );
2485 sz.width *= v1.channels();
2486 if( v1.isContinuous() && v2.isContinuous() )
2488 sz.width *= sz.height;
2492 if( depth == CV_32F )
2494 const float* src1 = v1.ptr<float>();
2495 const float* src2 = v2.ptr<float>();
2496 size_t step1 = v1.step/sizeof(src1[0]);
2497 size_t step2 = v2.step/sizeof(src2[0]);
2499 const float* mat = icovar.ptr<float>();
2500 size_t matstep = icovar.step/sizeof(mat[0]);
2502 for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
2504 for( i = 0; i < sz.width; i++ )
2505 diff[i] = src1[i] - src2[i];
2509 for( i = 0; i < len; i++, mat += matstep )
2513 #if CV_ENABLE_UNROLLED
2514 for(; j <= len - 4; j += 4 )
2515 row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
2516 diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
2518 for( ; j < len; j++ )
2519 row_sum += diff[j]*mat[j];
2520 result += row_sum * diff[i];
2523 else if( depth == CV_64F )
2525 const double* src1 = v1.ptr<double>();
2526 const double* src2 = v2.ptr<double>();
2527 size_t step1 = v1.step/sizeof(src1[0]);
2528 size_t step2 = v2.step/sizeof(src2[0]);
2530 const double* mat = icovar.ptr<double>();
2531 size_t matstep = icovar.step/sizeof(mat[0]);
2533 for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
2535 for( i = 0; i < sz.width; i++ )
2536 diff[i] = src1[i] - src2[i];
2540 for( i = 0; i < len; i++, mat += matstep )
2544 #if CV_ENABLE_UNROLLED
2545 for(; j <= len - 4; j += 4 )
2546 row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
2547 diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
2549 for( ; j < len; j++ )
2550 row_sum += diff[j]*mat[j];
2551 result += row_sum * diff[i];
2555 CV_Error( CV_StsUnsupportedFormat, "" );
2557 return std::sqrt(result);
2560 /****************************************************************************************\
2562 \****************************************************************************************/
2567 template<typename sT, typename dT> static void
2568 MulTransposedR( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
2571 const sT* src = srcmat.ptr<sT>();
2572 dT* dst = dstmat.ptr<dT>();
2573 const dT* delta = deltamat.ptr<dT>();
2574 size_t srcstep = srcmat.step/sizeof(src[0]);
2575 size_t dststep = dstmat.step/sizeof(dst[0]);
2576 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2577 int delta_cols = deltamat.cols;
2578 Size size = srcmat.size();
2582 int buf_size = size.height*sizeof(dT);
2583 AutoBuffer<uchar> buf;
2585 if( delta && delta_cols < size.width )
2587 assert( delta_cols == 1 );
2590 buf.allocate(buf_size);
2591 col_buf = (dT*)(uchar*)buf;
2593 if( delta && delta_cols < size.width )
2595 delta_buf = col_buf + size.height;
2596 for( i = 0; i < size.height; i++ )
2597 delta_buf[i*4] = delta_buf[i*4+1] =
2598 delta_buf[i*4+2] = delta_buf[i*4+3] = delta[i*deltastep];
2600 deltastep = deltastep ? 4 : 0;
2604 for( i = 0; i < size.width; i++, tdst += dststep )
2606 for( k = 0; k < size.height; k++ )
2607 col_buf[k] = src[k*srcstep+i];
2609 for( j = i; j <= size.width - 4; j += 4 )
2611 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2612 const sT *tsrc = src + j;
2614 for( k = 0; k < size.height; k++, tsrc += srcstep )
2616 double a = col_buf[k];
2623 tdst[j] = (dT)(s0*scale);
2624 tdst[j+1] = (dT)(s1*scale);
2625 tdst[j+2] = (dT)(s2*scale);
2626 tdst[j+3] = (dT)(s3*scale);
2629 for( ; j < size.width; j++ )
2632 const sT *tsrc = src + j;
2634 for( k = 0; k < size.height; k++, tsrc += srcstep )
2635 s0 += (double)col_buf[k] * tsrc[0];
2637 tdst[j] = (dT)(s0*scale);
2641 for( i = 0; i < size.width; i++, tdst += dststep )
2644 for( k = 0; k < size.height; k++ )
2645 col_buf[k] = src[k*srcstep+i] - delta[k*deltastep+i];
2647 for( k = 0; k < size.height; k++ )
2648 col_buf[k] = src[k*srcstep+i] - delta_buf[k*deltastep];
2650 for( j = i; j <= size.width - 4; j += 4 )
2652 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2653 const sT *tsrc = src + j;
2654 const dT *d = delta_buf ? delta_buf : delta + j;
2656 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2658 double a = col_buf[k];
2659 s0 += a * (tsrc[0] - d[0]);
2660 s1 += a * (tsrc[1] - d[1]);
2661 s2 += a * (tsrc[2] - d[2]);
2662 s3 += a * (tsrc[3] - d[3]);
2665 tdst[j] = (dT)(s0*scale);
2666 tdst[j+1] = (dT)(s1*scale);
2667 tdst[j+2] = (dT)(s2*scale);
2668 tdst[j+3] = (dT)(s3*scale);
2671 for( ; j < size.width; j++ )
2674 const sT *tsrc = src + j;
2675 const dT *d = delta_buf ? delta_buf : delta + j;
2677 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2678 s0 += (double)col_buf[k] * (tsrc[0] - d[0]);
2680 tdst[j] = (dT)(s0*scale);
2686 template<typename sT, typename dT> static void
2687 MulTransposedL( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
2690 const sT* src = srcmat.ptr<sT>();
2691 dT* dst = dstmat.ptr<dT>();
2692 const dT* delta = deltamat.ptr<dT>();
2693 size_t srcstep = srcmat.step/sizeof(src[0]);
2694 size_t dststep = dstmat.step/sizeof(dst[0]);
2695 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2696 int delta_cols = deltamat.cols;
2697 Size size = srcmat.size();
2701 for( i = 0; i < size.height; i++, tdst += dststep )
2702 for( j = i; j < size.height; j++ )
2705 const sT *tsrc1 = src + i*srcstep;
2706 const sT *tsrc2 = src + j*srcstep;
2708 for( k = 0; k <= size.width - 4; k += 4 )
2709 s += (double)tsrc1[k]*tsrc2[k] + (double)tsrc1[k+1]*tsrc2[k+1] +
2710 (double)tsrc1[k+2]*tsrc2[k+2] + (double)tsrc1[k+3]*tsrc2[k+3];
2711 for( ; k < size.width; k++ )
2712 s += (double)tsrc1[k] * tsrc2[k];
2713 tdst[j] = (dT)(s*scale);
2718 int delta_shift = delta_cols == size.width ? 4 : 0;
2719 AutoBuffer<uchar> buf(size.width*sizeof(dT));
2720 dT* row_buf = (dT*)(uchar*)buf;
2722 for( i = 0; i < size.height; i++, tdst += dststep )
2724 const sT *tsrc1 = src + i*srcstep;
2725 const dT *tdelta1 = delta + i*deltastep;
2727 if( delta_cols < size.width )
2728 for( k = 0; k < size.width; k++ )
2729 row_buf[k] = tsrc1[k] - tdelta1[0];
2731 for( k = 0; k < size.width; k++ )
2732 row_buf[k] = tsrc1[k] - tdelta1[k];
2734 for( j = i; j < size.height; j++ )
2737 const sT *tsrc2 = src + j*srcstep;
2738 const dT *tdelta2 = delta + j*deltastep;
2739 if( delta_cols < size.width )
2741 delta_buf[0] = delta_buf[1] =
2742 delta_buf[2] = delta_buf[3] = tdelta2[0];
2743 tdelta2 = delta_buf;
2745 for( k = 0; k <= size.width-4; k += 4, tdelta2 += delta_shift )
2746 s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]) +
2747 (double)row_buf[k+1]*(tsrc2[k+1] - tdelta2[1]) +
2748 (double)row_buf[k+2]*(tsrc2[k+2] - tdelta2[2]) +
2749 (double)row_buf[k+3]*(tsrc2[k+3] - tdelta2[3]);
2750 for( ; k < size.width; k++, tdelta2++ )
2751 s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]);
2752 tdst[j] = (dT)(s*scale);
2758 typedef void (*MulTransposedFunc)(const Mat& src, Mat& dst, const Mat& delta, double scale);
2762 void cv::mulTransposed( InputArray _src, OutputArray _dst, bool ata,
2763 InputArray _delta, double scale, int dtype )
2765 Mat src = _src.getMat(), delta = _delta.getMat();
2766 const int gemm_level = 100; // boundary above which GEMM is faster.
2767 int stype = src.type();
2768 dtype = std::max(std::max(CV_MAT_DEPTH(dtype >= 0 ? dtype : stype), delta.depth()), CV_32F);
2769 CV_Assert( src.channels() == 1 );
2771 if( !delta.empty() )
2773 CV_Assert( delta.channels() == 1 &&
2774 (delta.rows == src.rows || delta.rows == 1) &&
2775 (delta.cols == src.cols || delta.cols == 1));
2776 if( delta.type() != dtype )
2777 delta.convertTo(delta, dtype);
2780 int dsize = ata ? src.cols : src.rows;
2781 _dst.create( dsize, dsize, dtype );
2782 Mat dst = _dst.getMat();
2784 if( src.data == dst.data || (stype == dtype &&
2785 (dst.cols >= gemm_level && dst.rows >= gemm_level &&
2786 src.cols >= gemm_level && src.rows >= gemm_level)))
2789 const Mat* tsrc = &src;
2790 if( !delta.empty() )
2792 if( delta.size() == src.size() )
2793 subtract( src, delta, src2 );
2796 repeat(delta, src.rows/delta.rows, src.cols/delta.cols, src2);
2797 subtract( src, src2, src2 );
2801 gemm( *tsrc, *tsrc, scale, Mat(), 0, dst, ata ? GEMM_1_T : GEMM_2_T );
2805 MulTransposedFunc func = 0;
2806 if(stype == CV_8U && dtype == CV_32F)
2809 func = MulTransposedR<uchar,float>;
2811 func = MulTransposedL<uchar,float>;
2813 else if(stype == CV_8U && dtype == CV_64F)
2816 func = MulTransposedR<uchar,double>;
2818 func = MulTransposedL<uchar,double>;
2820 else if(stype == CV_16U && dtype == CV_32F)
2823 func = MulTransposedR<ushort,float>;
2825 func = MulTransposedL<ushort,float>;
2827 else if(stype == CV_16U && dtype == CV_64F)
2830 func = MulTransposedR<ushort,double>;
2832 func = MulTransposedL<ushort,double>;
2834 else if(stype == CV_16S && dtype == CV_32F)
2837 func = MulTransposedR<short,float>;
2839 func = MulTransposedL<short,float>;
2841 else if(stype == CV_16S && dtype == CV_64F)
2844 func = MulTransposedR<short,double>;
2846 func = MulTransposedL<short,double>;
2848 else if(stype == CV_32F && dtype == CV_32F)
2851 func = MulTransposedR<float,float>;
2853 func = MulTransposedL<float,float>;
2855 else if(stype == CV_32F && dtype == CV_64F)
2858 func = MulTransposedR<float,double>;
2860 func = MulTransposedL<float,double>;
2862 else if(stype == CV_64F && dtype == CV_64F)
2865 func = MulTransposedR<double,double>;
2867 func = MulTransposedL<double,double>;
2870 CV_Error( CV_StsUnsupportedFormat, "" );
2872 func( src, dst, delta, scale );
2873 completeSymm( dst, false );
2877 /****************************************************************************************\
2879 \****************************************************************************************/
2884 template<typename T> double
2885 dotProd_(const T* src1, const T* src2, int len)
2890 #if CV_ENABLE_UNROLLED
2891 for( ; i <= len - 4; i += 4 )
2892 result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] +
2893 (double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3];
2895 for( ; i < len; i++ )
2896 result += (double)src1[i]*src2[i];
2902 static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
2905 #if ARITHM_USE_IPP && 0
2908 if (0 <= ippiDotProd_8u64f_C1R(src1, (int)(len*sizeof(src1[0])),
2909 src2, (int)(len*sizeof(src2[0])),
2910 ippiSize(len, 1), &r))
2912 CV_IMPL_ADD(CV_IMPL_IPP);
2915 setIppErrorStatus();
2923 int j, len0 = len & -4, blockSize0 = (1 << 13), blockSize;
2924 __m128i z = _mm_setzero_si128();
2925 CV_DECL_ALIGNED(16) int buf[4];
2929 blockSize = std::min(len0 - i, blockSize0);
2932 for( ; j <= blockSize - 16; j += 16 )
2934 __m128i b0 = _mm_loadu_si128((const __m128i*)(src1 + j));
2935 __m128i b1 = _mm_loadu_si128((const __m128i*)(src2 + j));
2936 __m128i s0, s1, s2, s3;
2937 s0 = _mm_unpacklo_epi8(b0, z);
2938 s2 = _mm_unpackhi_epi8(b0, z);
2939 s1 = _mm_unpacklo_epi8(b1, z);
2940 s3 = _mm_unpackhi_epi8(b1, z);
2941 s0 = _mm_madd_epi16(s0, s1);
2942 s2 = _mm_madd_epi16(s2, s3);
2943 s = _mm_add_epi32(s, s0);
2944 s = _mm_add_epi32(s, s2);
2947 for( ; j < blockSize; j += 4 )
2949 __m128i s0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src1 + j)), z);
2950 __m128i s1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src2 + j)), z);
2951 s0 = _mm_madd_epi16(s0, s1);
2952 s = _mm_add_epi32(s, s0);
2955 _mm_store_si128((__m128i*)buf, s);
2956 r += buf[0] + buf[1] + buf[2] + buf[3];
2964 int len0 = len & -8, blockSize0 = (1 << 15), blockSize;
2965 uint32x4_t v_zero = vdupq_n_u32(0u);
2966 CV_DECL_ALIGNED(16) uint buf[4];
2970 blockSize = std::min(len0 - i, blockSize0);
2971 uint32x4_t v_sum = v_zero;
2974 for( ; j <= blockSize - 16; j += 16 )
2976 uint8x16_t v_src1 = vld1q_u8(src1 + j), v_src2 = vld1q_u8(src2 + j);
2978 uint16x8_t v_src10 = vmovl_u8(vget_low_u8(v_src1)), v_src20 = vmovl_u8(vget_low_u8(v_src2));
2979 v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
2980 v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
2982 v_src10 = vmovl_u8(vget_high_u8(v_src1));
2983 v_src20 = vmovl_u8(vget_high_u8(v_src2));
2984 v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
2985 v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
2988 for( ; j <= blockSize - 8; j += 8 )
2990 uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + j)), v_src2 = vmovl_u8(vld1_u8(src2 + j));
2991 v_sum = vmlal_u16(v_sum, vget_low_u16(v_src1), vget_low_u16(v_src2));
2992 v_sum = vmlal_u16(v_sum, vget_high_u16(v_src1), vget_high_u16(v_src2));
2995 vst1q_u32(buf, v_sum);
2996 r += buf[0] + buf[1] + buf[2] + buf[3];
3003 return r + dotProd_(src1, src2, len - i);
3007 static double dotProd_8s(const schar* src1, const schar* src2, int len)
3013 int len0 = len & -8, blockSize0 = (1 << 14), blockSize;
3014 int32x4_t v_zero = vdupq_n_s32(0);
3015 CV_DECL_ALIGNED(16) int buf[4];
3019 blockSize = std::min(len0 - i, blockSize0);
3020 int32x4_t v_sum = v_zero;
3023 for( ; j <= blockSize - 16; j += 16 )
3025 int8x16_t v_src1 = vld1q_s8(src1 + j), v_src2 = vld1q_s8(src2 + j);
3027 int16x8_t v_src10 = vmovl_s8(vget_low_s8(v_src1)), v_src20 = vmovl_s8(vget_low_s8(v_src2));
3028 v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
3029 v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
3031 v_src10 = vmovl_s8(vget_high_s8(v_src1));
3032 v_src20 = vmovl_s8(vget_high_s8(v_src2));
3033 v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
3034 v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
3037 for( ; j <= blockSize - 8; j += 8 )
3039 int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + j)), v_src2 = vmovl_s8(vld1_s8(src2 + j));
3040 v_sum = vmlal_s16(v_sum, vget_low_s16(v_src1), vget_low_s16(v_src2));
3041 v_sum = vmlal_s16(v_sum, vget_high_s16(v_src1), vget_high_s16(v_src2));
3044 vst1q_s32(buf, v_sum);
3045 r += buf[0] + buf[1] + buf[2] + buf[3];
3053 return r + dotProd_(src1, src2, len - i);
3056 static double dotProd_16u(const ushort* src1, const ushort* src2, int len)
3058 #if (ARITHM_USE_IPP == 1)
3062 if (0 <= ippiDotProd_16u64f_C1R(src1, (int)(len*sizeof(src1[0])), src2, (int)(len*sizeof(src2[0])), ippiSize(len, 1), &r))
3064 CV_IMPL_ADD(CV_IMPL_IPP);
3067 setIppErrorStatus();
3070 return dotProd_(src1, src2, len);
3073 static double dotProd_16s(const short* src1, const short* src2, int len)
3075 #if (ARITHM_USE_IPP == 1)
3079 if (0 <= ippiDotProd_16s64f_C1R(src1, (int)(len*sizeof(src1[0])), src2, (int)(len*sizeof(src2[0])), ippiSize(len, 1), &r))
3081 CV_IMPL_ADD(CV_IMPL_IPP);
3084 setIppErrorStatus();
3087 return dotProd_(src1, src2, len);
3090 static double dotProd_32s(const int* src1, const int* src2, int len)
3092 #if (ARITHM_USE_IPP == 1)
3096 if (0 <= ippiDotProd_32s64f_C1R(src1, (int)(len*sizeof(src1[0])), src2, (int)(len*sizeof(src2[0])), ippiSize(len, 1), &r))
3098 CV_IMPL_ADD(CV_IMPL_IPP);
3101 setIppErrorStatus();
3104 return dotProd_(src1, src2, len);
3107 static double dotProd_32f(const float* src1, const float* src2, int len)
3112 #if (ARITHM_USE_IPP == 1)
3115 if (0 <= ippsDotProd_32f64f(src1, src2, len, &r))
3117 CV_IMPL_ADD(CV_IMPL_IPP);
3120 setIppErrorStatus();
3123 int len0 = len & -4, blockSize0 = (1 << 13), blockSize;
3124 float32x4_t v_zero = vdupq_n_f32(0.0f);
3125 CV_DECL_ALIGNED(16) float buf[4];
3129 blockSize = std::min(len0 - i, blockSize0);
3130 float32x4_t v_sum = v_zero;
3133 for( ; j <= blockSize - 4; j += 4 )
3134 v_sum = vmlaq_f32(v_sum, vld1q_f32(src1 + j), vld1q_f32(src2 + j));
3136 vst1q_f32(buf, v_sum);
3137 r += buf[0] + buf[1] + buf[2] + buf[3];
3144 return r + dotProd_(src1, src2, len - i);
3147 static double dotProd_64f(const double* src1, const double* src2, int len)
3149 #if (ARITHM_USE_IPP == 1)
3153 if (0 <= ippsDotProd_64f(src1, src2, len, &r))
3155 CV_IMPL_ADD(CV_IMPL_IPP);
3158 setIppErrorStatus();
3161 return dotProd_(src1, src2, len);
3165 typedef double (*DotProdFunc)(const uchar* src1, const uchar* src2, int len);
3167 static DotProdFunc getDotProdFunc(int depth)
3169 static DotProdFunc dotProdTab[] =
3171 (DotProdFunc)GET_OPTIMIZED(dotProd_8u), (DotProdFunc)GET_OPTIMIZED(dotProd_8s),
3172 (DotProdFunc)dotProd_16u, (DotProdFunc)dotProd_16s,
3173 (DotProdFunc)dotProd_32s, (DotProdFunc)GET_OPTIMIZED(dotProd_32f),
3174 (DotProdFunc)dotProd_64f, 0
3177 return dotProdTab[depth];
3180 double Mat::dot(InputArray _mat) const
3182 Mat mat = _mat.getMat();
3183 int cn = channels();
3184 DotProdFunc func = getDotProdFunc(depth());
3185 CV_Assert( mat.type() == type() && mat.size == size && func != 0 );
3187 if( isContinuous() && mat.isContinuous() )
3189 size_t len = total()*cn;
3190 if( len == (size_t)(int)len )
3191 return func(data, mat.data, (int)len);
3194 const Mat* arrays[] = {this, &mat, 0};
3196 NAryMatIterator it(arrays, ptrs);
3197 int len = (int)(it.size*cn);
3200 for( size_t i = 0; i < it.nplanes; i++, ++it )
3201 r += func( ptrs[0], ptrs[1], len );
3208 /****************************************************************************************\
3210 \****************************************************************************************/
3212 CV_IMPL void cvGEMM( const CvArr* Aarr, const CvArr* Barr, double alpha,
3213 const CvArr* Carr, double beta, CvArr* Darr, int flags )
3215 cv::Mat A = cv::cvarrToMat(Aarr), B = cv::cvarrToMat(Barr);
3216 cv::Mat C, D = cv::cvarrToMat(Darr);
3219 C = cv::cvarrToMat(Carr);
3221 CV_Assert( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)) &&
3222 (D.cols == ((flags & CV_GEMM_B_T) == 0 ? B.cols : B.rows)) &&
3223 D.type() == A.type() );
3225 gemm( A, B, alpha, C, beta, D, flags );
3230 cvTransform( const CvArr* srcarr, CvArr* dstarr,
3231 const CvMat* transmat, const CvMat* shiftvec )
3233 cv::Mat m = cv::cvarrToMat(transmat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
3237 cv::Mat v = cv::cvarrToMat(shiftvec).reshape(1,m.rows),
3238 _m(m.rows, m.cols + 1, m.type()), m1 = _m.colRange(0,m.cols), v1 = _m.col(m.cols);
3239 m.convertTo(m1, m1.type());
3240 v.convertTo(v1, v1.type());
3244 CV_Assert( dst.depth() == src.depth() && dst.channels() == m.rows );
3245 cv::transform( src, dst, m );
3250 cvPerspectiveTransform( const CvArr* srcarr, CvArr* dstarr, const CvMat* mat )
3252 cv::Mat m = cv::cvarrToMat(mat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
3254 CV_Assert( dst.type() == src.type() && dst.channels() == m.rows-1 );
3255 cv::perspectiveTransform( src, dst, m );
3259 CV_IMPL void cvScaleAdd( const CvArr* srcarr1, CvScalar scale,
3260 const CvArr* srcarr2, CvArr* dstarr )
3262 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3264 CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3265 cv::scaleAdd( src1, scale.val[0], cv::cvarrToMat(srcarr2), dst );
3270 cvCalcCovarMatrix( const CvArr** vecarr, int count,
3271 CvArr* covarr, CvArr* avgarr, int flags )
3273 cv::Mat cov0 = cv::cvarrToMat(covarr), cov = cov0, mean0, mean;
3274 CV_Assert( vecarr != 0 && count >= 1 );
3277 mean = mean0 = cv::cvarrToMat(avgarr);
3279 if( (flags & CV_COVAR_COLS) != 0 || (flags & CV_COVAR_ROWS) != 0 )
3282 cv::Mat data = cv::cvarrToMat(vecarr[0]);
3283 cv::calcCovarMatrix( data, cov, mean, flags, cov.type() );
3287 std::vector<cv::Mat> data(count);
3288 for( int i = 0; i < count; i++ )
3289 data[i] = cv::cvarrToMat(vecarr[i]);
3290 cv::calcCovarMatrix( &data[0], count, cov, mean, flags, cov.type() );
3293 if( mean.data != mean0.data && mean0.data )
3294 mean.convertTo(mean0, mean0.type());
3296 if( cov.data != cov0.data )
3297 cov.convertTo(cov0, cov0.type());
3302 cvMahalanobis( const CvArr* srcAarr, const CvArr* srcBarr, const CvArr* matarr )
3304 return cv::Mahalanobis(cv::cvarrToMat(srcAarr),
3305 cv::cvarrToMat(srcBarr), cv::cvarrToMat(matarr));
3309 cvMulTransposed( const CvArr* srcarr, CvArr* dstarr,
3310 int order, const CvArr* deltaarr, double scale )
3312 cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0, delta;
3314 delta = cv::cvarrToMat(deltaarr);
3315 cv::mulTransposed( src, dst, order != 0, delta, scale, dst.type());
3316 if( dst.data != dst0.data )
3317 dst.convertTo(dst0, dst0.type());
3320 CV_IMPL double cvDotProduct( const CvArr* srcAarr, const CvArr* srcBarr )
3322 return cv::cvarrToMat(srcAarr).dot(cv::cvarrToMat(srcBarr));
3327 cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigenvects, int flags )
3329 cv::Mat data = cv::cvarrToMat(data_arr), mean0 = cv::cvarrToMat(avg_arr);
3330 cv::Mat evals0 = cv::cvarrToMat(eigenvals), evects0 = cv::cvarrToMat(eigenvects);
3331 cv::Mat mean = mean0, evals = evals0, evects = evects0;
3335 pca.eigenvalues = evals;
3336 pca.eigenvectors = evects;
3338 pca(data, (flags & CV_PCA_USE_AVG) ? mean : cv::Mat(),
3339 flags, !evals.empty() ? evals.rows + evals.cols - 1 : 0);
3341 if( pca.mean.size() == mean.size() )
3342 pca.mean.convertTo( mean, mean.type() );
3345 cv::Mat temp; pca.mean.convertTo( temp, mean.type() );
3346 transpose( temp, mean );
3349 evals = pca.eigenvalues;
3350 evects = pca.eigenvectors;
3351 int ecount0 = evals0.cols + evals0.rows - 1;
3352 int ecount = evals.cols + evals.rows - 1;
3354 CV_Assert( (evals0.cols == 1 || evals0.rows == 1) &&
3355 ecount0 <= ecount &&
3356 evects0.cols == evects.cols &&
3357 evects0.rows == ecount0 );
3359 cv::Mat temp = evals0;
3360 if( evals.rows == 1 )
3361 evals.colRange(0, ecount0).convertTo(temp, evals0.type());
3363 evals.rowRange(0, ecount0).convertTo(temp, evals0.type());
3364 if( temp.data != evals0.data )
3365 transpose(temp, evals0);
3366 evects.rowRange(0, ecount0).convertTo( evects0, evects0.type() );
3368 // otherwise some datatype's or size's were incorrect, so the output arrays have been reallocated
3369 CV_Assert( mean0.data == mean.data );
3374 cvProjectPCA( const CvArr* data_arr, const CvArr* avg_arr,
3375 const CvArr* eigenvects, CvArr* result_arr )
3377 cv::Mat data = cv::cvarrToMat(data_arr), mean = cv::cvarrToMat(avg_arr);
3378 cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0;
3383 if( mean.rows == 1 )
3385 CV_Assert(dst.cols <= evects.rows && dst.rows == data.rows);
3390 CV_Assert(dst.rows <= evects.rows && dst.cols == data.cols);
3393 pca.eigenvectors = evects.rowRange(0, n);
3395 cv::Mat result = pca.project(data);
3396 if( result.cols != dst.cols )
3397 result = result.reshape(1, 1);
3398 result.convertTo(dst, dst.type());
3400 CV_Assert(dst0.data == dst.data);
3405 cvBackProjectPCA( const CvArr* proj_arr, const CvArr* avg_arr,
3406 const CvArr* eigenvects, CvArr* result_arr )
3408 cv::Mat data = cv::cvarrToMat(proj_arr), mean = cv::cvarrToMat(avg_arr);
3409 cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0;
3414 if( mean.rows == 1 )
3416 CV_Assert(data.cols <= evects.rows && dst.rows == data.rows);
3421 CV_Assert(data.rows <= evects.rows && dst.cols == data.cols);
3424 pca.eigenvectors = evects.rowRange(0, n);
3426 cv::Mat result = pca.backProject(data);
3427 result.convertTo(dst, dst.type());
3429 CV_Assert(dst0.data == dst.data);