1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencl_kernels_core.hpp"
45 #include "opencv2/core/opencl/runtime/opencl_clamdblas.hpp"
50 /****************************************************************************************\
52 \****************************************************************************************/
55 GEMM_CopyBlock( const uchar* src, size_t src_step,
56 uchar* dst, size_t dst_step,
57 Size size, size_t pix_size )
60 size.width *= (int)(pix_size / sizeof(int));
62 for( ; size.height--; src += src_step, dst += dst_step )
65 #if CV_ENABLE_UNROLLED
66 for( ; j <= size.width - 4; j += 4 )
68 int t0 = ((const int*)src)[j];
69 int t1 = ((const int*)src)[j+1];
71 ((int*)dst)[j+1] = t1;
72 t0 = ((const int*)src)[j+2];
73 t1 = ((const int*)src)[j+3];
74 ((int*)dst)[j+2] = t0;
75 ((int*)dst)[j+3] = t1;
78 for( ; j < size.width; j++ )
79 ((int*)dst)[j] = ((const int*)src)[j];
85 GEMM_TransposeBlock( const uchar* src, size_t src_step,
86 uchar* dst, size_t dst_step,
87 Size size, size_t pix_size )
90 for( i = 0; i < size.width; i++, dst += dst_step, src += pix_size )
92 const uchar* _src = src;
96 for( j = 0; j < size.height; j++, _src += src_step )
97 ((int*)dst)[j] = ((int*)_src)[0];
100 for( j = 0; j < size.height*2; j += 2, _src += src_step )
102 int t0 = ((int*)_src)[0];
103 int t1 = ((int*)_src)[1];
105 ((int*)dst)[j+1] = t1;
109 for( j = 0; j < size.height*4; j += 4, _src += src_step )
111 int t0 = ((int*)_src)[0];
112 int t1 = ((int*)_src)[1];
114 ((int*)dst)[j+1] = t1;
115 t0 = ((int*)_src)[2];
116 t1 = ((int*)_src)[3];
117 ((int*)dst)[j+2] = t0;
118 ((int*)dst)[j+3] = t1;
129 template<typename T, typename WT> static void
130 GEMMSingleMul( const T* a_data, size_t a_step,
131 const T* b_data, size_t b_step,
132 const T* c_data, size_t c_step,
133 T* d_data, size_t d_step,
134 Size a_size, Size d_size,
135 double alpha, double beta, int flags )
137 int i, j, k, n = a_size.width, m = d_size.width, drows = d_size.height;
138 const T *_a_data = a_data, *_b_data = b_data, *_c_data = c_data;
139 cv::AutoBuffer<T> _a_buf;
141 size_t a_step0, a_step1, c_step0, c_step1, t_step;
143 a_step /= sizeof(a_data[0]);
144 b_step /= sizeof(b_data[0]);
145 c_step /= sizeof(c_data[0]);
146 d_step /= sizeof(d_data[0]);
151 c_step0 = c_step1 = 0;
152 else if( !(flags & GEMM_3_T) )
153 c_step0 = c_step, c_step1 = 1;
155 c_step0 = 1, c_step1 = c_step;
157 if( flags & GEMM_1_T )
159 CV_SWAP( a_step0, a_step1, t_step );
161 if( a_step > 1 && n > 1 )
168 if( n == 1 ) /* external product */
170 cv::AutoBuffer<T> _b_buf;
173 if( a_step > 1 && a_size.height > 1 )
175 _a_buf.allocate(drows);
177 for( k = 0; k < drows; k++ )
178 a_buf[k] = a_data[a_step*k];
184 _b_buf.allocate(d_size.width);
186 for( j = 0; j < d_size.width; j++ )
187 b_buf[j] = b_data[j*b_step];
191 for( i = 0; i < drows; i++, _c_data += c_step0, d_data += d_step )
193 WT al = WT(a_data[i])*alpha;
195 for( j = 0; j <= d_size.width - 2; j += 2, c_data += 2*c_step1 )
197 WT s0 = al*WT(b_data[j]);
198 WT s1 = al*WT(b_data[j+1]);
206 d_data[j] = T(s0 + WT(c_data[0])*beta);
207 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
211 for( ; j < d_size.width; j++, c_data += c_step1 )
213 WT s0 = al*WT(b_data[j]);
217 d_data[j] = T(s0 + WT(c_data[0])*beta);
221 else if( flags & GEMM_2_T ) /* A * Bt */
223 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
231 for( k = 0; k < n; k++ )
232 a_buf[k] = a_data[a_step1*k];
236 for( j = 0; j < d_size.width; j++, b_data += b_step,
239 WT s0(0), s1(0), s2(0), s3(0);
241 #if CV_ENABLE_UNROLLED
242 for( ; k <= n - 4; k += 4 )
244 s0 += WT(a_data[k])*WT(b_data[k]);
245 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
246 s2 += WT(a_data[k+2])*WT(b_data[k+2]);
247 s3 += WT(a_data[k+3])*WT(b_data[k+3]);
251 s0 += WT(a_data[k])*WT(b_data[k]);
252 s0 = (s0+s1+s2+s3)*alpha;
257 d_data[j] = T(s0 + WT(c_data[0])*beta);
261 else if( d_size.width*sizeof(d_data[0]) <= 1600 )
263 for( i = 0; i < drows; i++, _a_data += a_step0,
267 a_data = _a_data, c_data = _c_data;
271 for( k = 0; k < n; k++ )
272 a_buf[k] = a_data[a_step1*k];
276 for( j = 0; j <= m - 4; j += 4, c_data += 4*c_step1 )
278 const T* b = _b_data + j;
279 WT s0(0), s1(0), s2(0), s3(0);
281 for( k = 0; k < n; k++, b += b_step )
284 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
285 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
290 d_data[j] = T(s0*alpha);
291 d_data[j+1] = T(s1*alpha);
292 d_data[j+2] = T(s2*alpha);
293 d_data[j+3] = T(s3*alpha);
297 s0 = s0*alpha; s1 = s1*alpha;
298 s2 = s2*alpha; s3 = s3*alpha;
299 d_data[j] = T(s0 + WT(c_data[0])*beta);
300 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
301 d_data[j+2] = T(s2 + WT(c_data[c_step1*2])*beta);
302 d_data[j+3] = T(s3 + WT(c_data[c_step1*3])*beta);
306 for( ; j < m; j++, c_data += c_step1 )
308 const T* b = _b_data + j;
311 for( k = 0; k < n; k++, b += b_step )
312 s0 += WT(a_data[k]) * WT(b[0]);
318 d_data[j] = T(s0 + WT(c_data[0])*beta);
324 cv::AutoBuffer<WT> _d_buf(m);
327 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
335 for( k = 0; k < n; k++ )
336 a_buf[k] = _a_data[a_step1*k];
340 for( j = 0; j < m; j++ )
343 for( k = 0; k < n; k++, b_data += b_step )
347 #if CV_ENABLE_UNROLLED
348 for(; j <= m - 4; j += 4 )
350 WT t0 = d_buf[j] + WT(b_data[j])*al;
351 WT t1 = d_buf[j+1] + WT(b_data[j+1])*al;
354 t0 = d_buf[j+2] + WT(b_data[j+2])*al;
355 t1 = d_buf[j+3] + WT(b_data[j+3])*al;
361 d_buf[j] += WT(b_data[j])*al;
365 for( j = 0; j < m; j++ )
366 d_data[j] = T(d_buf[j]*alpha);
368 for( j = 0; j < m; j++, c_data += c_step1 )
370 WT t = d_buf[j]*alpha;
371 d_data[j] = T(t + WT(c_data[0])*beta);
378 template<typename T, typename WT> static void
379 GEMMBlockMul( const T* a_data, size_t a_step,
380 const T* b_data, size_t b_step,
381 WT* d_data, size_t d_step,
382 Size a_size, Size d_size, int flags )
384 int i, j, k, n = a_size.width, m = d_size.width;
385 const T *_a_data = a_data, *_b_data = b_data;
386 cv::AutoBuffer<T> _a_buf;
388 size_t a_step0, a_step1, t_step;
389 int do_acc = flags & 16;
391 a_step /= sizeof(a_data[0]);
392 b_step /= sizeof(b_data[0]);
393 d_step /= sizeof(d_data[0]);
398 if( flags & GEMM_1_T )
400 CV_SWAP( a_step0, a_step1, t_step );
406 if( flags & GEMM_2_T )
408 /* second operand is transposed */
409 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
411 a_data = _a_data; b_data = _b_data;
415 for( k = 0; k < n; k++ )
416 a_buf[k] = a_data[a_step1*k];
420 for( j = 0; j < d_size.width; j++, b_data += b_step )
422 WT s0 = do_acc ? d_data[j]:WT(0), s1(0);
423 for( k = 0; k <= n - 2; k += 2 )
425 s0 += WT(a_data[k])*WT(b_data[k]);
426 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
430 s0 += WT(a_data[k])*WT(b_data[k]);
438 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
440 a_data = _a_data, b_data = _b_data;
444 for( k = 0; k < n; k++ )
445 a_buf[k] = a_data[a_step1*k];
449 for( j = 0; j <= m - 4; j += 4 )
452 const T* b = b_data + j;
456 s0 = d_data[j]; s1 = d_data[j+1];
457 s2 = d_data[j+2]; s3 = d_data[j+3];
460 s0 = s1 = s2 = s3 = WT(0);
462 for( k = 0; k < n; k++, b += b_step )
465 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
466 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
469 d_data[j] = s0; d_data[j+1] = s1;
470 d_data[j+2] = s2; d_data[j+3] = s3;
475 const T* b = b_data + j;
476 WT s0 = do_acc ? d_data[j] : WT(0);
478 for( k = 0; k < n; k++, b += b_step )
479 s0 += WT(a_data[k]) * WT(b[0]);
488 template<typename T, typename WT> static void
489 GEMMStore( const T* c_data, size_t c_step,
490 const WT* d_buf, size_t d_buf_step,
491 T* d_data, size_t d_step, Size d_size,
492 double alpha, double beta, int flags )
494 const T* _c_data = c_data;
496 size_t c_step0, c_step1;
498 c_step /= sizeof(c_data[0]);
499 d_buf_step /= sizeof(d_buf[0]);
500 d_step /= sizeof(d_data[0]);
503 c_step0 = c_step1 = 0;
504 else if( !(flags & GEMM_3_T) )
505 c_step0 = c_step, c_step1 = 1;
507 c_step0 = 1, c_step1 = c_step;
509 for( ; d_size.height--; _c_data += c_step0, d_buf += d_buf_step, d_data += d_step )
515 #if CV_ENABLE_UNROLLED
516 for(; j <= d_size.width - 4; j += 4, c_data += 4*c_step1 )
518 WT t0 = alpha*d_buf[j];
519 WT t1 = alpha*d_buf[j+1];
520 t0 += beta*WT(c_data[0]);
521 t1 += beta*WT(c_data[c_step1]);
524 t0 = alpha*d_buf[j+2];
525 t1 = alpha*d_buf[j+3];
526 t0 += beta*WT(c_data[c_step1*2]);
527 t1 += beta*WT(c_data[c_step1*3]);
532 for( ; j < d_size.width; j++, c_data += c_step1 )
534 WT t0 = alpha*d_buf[j];
535 d_data[j] = T(t0 + WT(c_data[0])*beta);
541 #if CV_ENABLE_UNROLLED
542 for( ; j <= d_size.width - 4; j += 4 )
544 WT t0 = alpha*d_buf[j];
545 WT t1 = alpha*d_buf[j+1];
548 t0 = alpha*d_buf[j+2];
549 t1 = alpha*d_buf[j+3];
554 for( ; j < d_size.width; j++ )
555 d_data[j] = T(alpha*d_buf[j]);
561 typedef void (*GEMMSingleMulFunc)( const void* src1, size_t step1,
562 const void* src2, size_t step2, const void* src3, size_t step3,
563 void* dst, size_t dststep, Size srcsize, Size dstsize,
564 double alpha, double beta, int flags );
566 typedef void (*GEMMBlockMulFunc)( const void* src1, size_t step1,
567 const void* src2, size_t step2, void* dst, size_t dststep,
568 Size srcsize, Size dstsize, int flags );
570 typedef void (*GEMMStoreFunc)( const void* src1, size_t step1,
571 const void* src2, size_t step2, void* dst, size_t dststep,
572 Size dstsize, double alpha, double beta, int flags );
574 static void GEMMSingleMul_32f( const float* a_data, size_t a_step,
575 const float* b_data, size_t b_step,
576 const float* c_data, size_t c_step,
577 float* d_data, size_t d_step,
578 Size a_size, Size d_size,
579 double alpha, double beta, int flags )
581 GEMMSingleMul<float,double>(a_data, a_step, b_data, b_step, c_data,
582 c_step, d_data, d_step, a_size, d_size,
586 static void GEMMSingleMul_64f( const double* a_data, size_t a_step,
587 const double* b_data, size_t b_step,
588 const double* c_data, size_t c_step,
589 double* d_data, size_t d_step,
590 Size a_size, Size d_size,
591 double alpha, double beta, int flags )
593 GEMMSingleMul<double,double>(a_data, a_step, b_data, b_step, c_data,
594 c_step, d_data, d_step, a_size, d_size,
599 static void GEMMSingleMul_32fc( const Complexf* a_data, size_t a_step,
600 const Complexf* b_data, size_t b_step,
601 const Complexf* c_data, size_t c_step,
602 Complexf* d_data, size_t d_step,
603 Size a_size, Size d_size,
604 double alpha, double beta, int flags )
606 GEMMSingleMul<Complexf,Complexd>(a_data, a_step, b_data, b_step, c_data,
607 c_step, d_data, d_step, a_size, d_size,
611 static void GEMMSingleMul_64fc( const Complexd* a_data, size_t a_step,
612 const Complexd* b_data, size_t b_step,
613 const Complexd* c_data, size_t c_step,
614 Complexd* d_data, size_t d_step,
615 Size a_size, Size d_size,
616 double alpha, double beta, int flags )
618 GEMMSingleMul<Complexd,Complexd>(a_data, a_step, b_data, b_step, c_data,
619 c_step, d_data, d_step, a_size, d_size,
623 static void GEMMBlockMul_32f( const float* a_data, size_t a_step,
624 const float* b_data, size_t b_step,
625 double* d_data, size_t d_step,
626 Size a_size, Size d_size, int flags )
628 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
632 static void GEMMBlockMul_64f( const double* a_data, size_t a_step,
633 const double* b_data, size_t b_step,
634 double* d_data, size_t d_step,
635 Size a_size, Size d_size, int flags )
637 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
641 static void GEMMBlockMul_32fc( const Complexf* a_data, size_t a_step,
642 const Complexf* b_data, size_t b_step,
643 Complexd* d_data, size_t d_step,
644 Size a_size, Size d_size, int flags )
646 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
650 static void GEMMBlockMul_64fc( const Complexd* a_data, size_t a_step,
651 const Complexd* b_data, size_t b_step,
652 Complexd* d_data, size_t d_step,
653 Size a_size, Size d_size, int flags )
655 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
659 static void GEMMStore_32f( const float* c_data, size_t c_step,
660 const double* d_buf, size_t d_buf_step,
661 float* d_data, size_t d_step, Size d_size,
662 double alpha, double beta, int flags )
664 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
668 static void GEMMStore_64f( const double* c_data, size_t c_step,
669 const double* d_buf, size_t d_buf_step,
670 double* d_data, size_t d_step, Size d_size,
671 double alpha, double beta, int flags )
673 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
677 static void GEMMStore_32fc( const Complexf* c_data, size_t c_step,
678 const Complexd* d_buf, size_t d_buf_step,
679 Complexf* d_data, size_t d_step, Size d_size,
680 double alpha, double beta, int flags )
682 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
686 static void GEMMStore_64fc( const Complexd* c_data, size_t c_step,
687 const Complexd* d_buf, size_t d_buf_step,
688 Complexd* d_data, size_t d_step, Size d_size,
689 double alpha, double beta, int flags )
691 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
694 #ifdef HAVE_CLAMDBLAS
696 static bool ocl_gemm_amdblas( InputArray matA, InputArray matB, double alpha,
697 InputArray matC, double beta, OutputArray matD, int flags )
699 int type = matA.type(), esz = CV_ELEM_SIZE(type);
700 bool haveC = matC.kind() != cv::_InputArray::NONE;
701 Size sizeA = matA.size(), sizeB = matB.size(), sizeC = haveC ? matC.size() : Size(0, 0);
702 bool atrans = (flags & GEMM_1_T) != 0, btrans = (flags & GEMM_2_T) != 0, ctrans = (flags & GEMM_3_T) != 0;
705 sizeA = Size(sizeA.height, sizeA.width);
707 sizeB = Size(sizeB.height, sizeB.width);
709 sizeC = Size(sizeC.height, sizeC.width);
711 Size sizeD(sizeB.width, sizeA.height);
713 CV_Assert( matB.type() == type && (!haveC || matC.type() == type) );
714 CV_Assert( sizeA.width == sizeB.height && (!haveC || sizeC == sizeD) );
716 matD.create(sizeD, type);
717 if ( matA.offset() % esz != 0 || matA.step() % esz != 0 ||
718 matB.offset() % esz != 0 || matB.step() % esz != 0 ||
719 (haveC && (matC.offset() % esz != 0 || matC.step() % esz != 0)) )
722 UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat();
724 ctrans ? transpose(matC, D) : matC.copyTo(D);
726 D.setTo(Scalar::all(0));
728 int M = sizeD.height, N = sizeD.width, K = sizeA.width;
729 int lda = (int)A.step / esz, ldb = (int)B.step / esz, ldc = (int)D.step / esz;
730 int offa = (int)A.offset / esz, offb = (int)B.offset / esz, offc = (int)D.offset / esz;
732 cl_command_queue clq = (cl_command_queue)ocl::Queue::getDefault().ptr();
733 clAmdBlasTranspose transA = atrans ? clAmdBlasTrans : clAmdBlasNoTrans;
734 clAmdBlasTranspose transB = btrans ? clAmdBlasTrans : clAmdBlasNoTrans;
735 clAmdBlasOrder order = clAmdBlasRowMajor;
736 clAmdBlasStatus status = clAmdBlasSuccess;
738 if (type == CV_32FC1)
739 status = clAmdBlasSgemmEx(order, transA, transB, M, N, K,
740 (cl_float)alpha, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
741 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
742 (cl_float)beta, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
743 1, &clq, 0, NULL, NULL);
744 else if (type == CV_64FC1)
745 status = clAmdBlasDgemmEx(order, transA, transB, M, N, K,
746 alpha, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
747 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
748 beta, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
749 1, &clq, 0, NULL, NULL);
750 else if (type == CV_32FC2)
752 cl_float2 alpha_2 = { { (cl_float)alpha, 0 } };
753 cl_float2 beta_2 = { { (cl_float)beta, 0 } };
754 status = clAmdBlasCgemmEx(order, transA, transB, M, N, K,
755 alpha_2, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
756 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
757 beta_2, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
758 1, &clq, 0, NULL, NULL);
760 else if (type == CV_64FC2)
762 cl_double2 alpha_2 = { { alpha, 0 } };
763 cl_double2 beta_2 = { { beta, 0 } };
764 status = clAmdBlasZgemmEx(order, transA, transB, M, N, K,
765 alpha_2, (const cl_mem)A.handle(ACCESS_READ), offa, lda,
766 (const cl_mem)B.handle(ACCESS_READ), offb, ldb,
767 beta_2, (cl_mem)D.handle(ACCESS_RW), offc, ldc,
768 1, &clq, 0, NULL, NULL);
771 CV_Error(Error::StsUnsupportedFormat, "");
773 return status == clAmdBlasSuccess;
780 static bool ocl_gemm( InputArray matA, InputArray matB, double alpha,
781 InputArray matC, double beta, OutputArray matD, int flags )
783 int depth = matA.depth(), cn = matA.channels();
784 int type = CV_MAKETYPE(depth, cn);
786 CV_Assert( type == matB.type() && (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
788 const ocl::Device & dev = ocl::Device::getDefault();
789 bool doubleSupport = dev.doubleFPConfig() > 0;
791 if ((!doubleSupport && depth == CV_64F))
794 bool haveC = matC.kind() != cv::_InputArray::NONE;
795 Size sizeA = matA.size(), sizeB = matB.size(), sizeC = haveC ? matC.size() : Size(0, 0);
796 bool atrans = (flags & GEMM_1_T) != 0, btrans = (flags & GEMM_2_T) != 0, ctrans = (flags & GEMM_3_T) != 0;
799 sizeA = Size(sizeA.height, sizeA.width);
801 sizeB = Size(sizeB.height, sizeB.width);
803 sizeC = Size(sizeC.height, sizeC.width);
805 Size sizeD(sizeB.width, sizeA.height);
807 CV_Assert( matB.type() == type && (!haveC || matC.type() == type) );
808 CV_Assert( sizeA.width == sizeB.height && (!haveC || sizeC == sizeD) );
810 int max_wg_size = (int)dev.maxWorkGroupSize();
811 int block_size = (max_wg_size / (32*cn) < 32) ? (max_wg_size / (16*cn) < 16) ? (max_wg_size / (8*cn) < 8) ? 1 : 8 : 16 : 32;
813 matD.create(sizeD, type);
815 UMat A = matA.getUMat(), B = matB.getUMat(), D = matD.getUMat();
824 ctrans ? transpose(matC, D) : matC.copyTo(D);
826 D.setTo(Scalar::all(0));
828 int vectorWidths[] = { 4, 4, 2, 2, 1, 4, cn, -1 };
830 int kercn = ocl::checkOptimalVectorWidth(vectorWidths, B, D);
832 String opts = format("-D T=%s -D T1=%s -D WT=%s -D cn=%d -D kercn=%d -D LOCAL_SIZE=%d %s %s %s",
833 ocl::typeToStr(type), ocl::typeToStr(depth), ocl::typeToStr(CV_MAKETYPE(depth, kercn)),
834 cn, kercn, block_size,
835 (sizeA.width % block_size !=0) ? "-D NO_MULT" : "",
836 haveC ? "-D HAVE_C" : "",
837 doubleSupport ? " -D DOUBLE_SUPPORT" : "");
839 ocl::Kernel k("gemm", cv::ocl::core::gemm_oclsrc, opts);
844 k.args(ocl::KernelArg::ReadOnlyNoSize(A),
845 ocl::KernelArg::ReadOnlyNoSize(B, cn, kercn),
846 ocl::KernelArg::ReadWrite(D, cn, kercn),
847 sizeA.width, alpha, beta);
849 k.args(ocl::KernelArg::ReadOnlyNoSize(A),
850 ocl::KernelArg::ReadOnlyNoSize(B, cn, kercn),
851 ocl::KernelArg::ReadWrite(D, cn, kercn),
852 sizeA.width, (float)alpha, (float)beta);
854 size_t globalsize[2] = { sizeD.width * cn / kercn, sizeD.height};
855 size_t localsize[2] = { block_size, block_size};
856 return k.run(2, globalsize, block_size!=1 ? localsize : NULL, false);
861 void cv::gemm( InputArray matA, InputArray matB, double alpha,
862 InputArray matC, double beta, OutputArray _matD, int flags )
864 #ifdef HAVE_CLAMDBLAS
865 CV_OCL_RUN(ocl::haveAmdBlas() && matA.dims() <= 2 && matB.dims() <= 2 && matC.dims() <= 2 && _matD.isUMat() &&
866 matA.cols() > 20 && matA.rows() > 20 && matB.cols() > 20, // since it works incorrect for small sizes
867 ocl_gemm_amdblas(matA, matB, alpha, matC, beta, _matD, flags))
871 CV_OCL_RUN(_matD.isUMat() && matA.dims() <= 2 && matB.dims() <= 2 && matC.dims() <= 2,
872 ocl_gemm(matA, matB, alpha, matC, beta, _matD, flags))
875 const int block_lin_size = 128;
876 const int block_size = block_lin_size * block_lin_size;
878 static double zero[] = {0,0,0,0};
879 static float zerof[] = {0,0,0,0};
881 Mat A = matA.getMat(), B = matB.getMat(), C = beta != 0 ? matC.getMat() : Mat();
882 Size a_size = A.size(), d_size;
883 int i, len = 0, type = A.type();
885 CV_Assert( type == B.type() && (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
887 switch( flags & (GEMM_1_T|GEMM_2_T) )
890 d_size = Size( B.cols, a_size.height );
892 CV_Assert( a_size.width == len );
895 d_size = Size( B.cols, a_size.width );
897 CV_Assert( a_size.height == len );
900 d_size = Size( B.rows, a_size.height );
902 CV_Assert( a_size.width == len );
905 d_size = Size( B.rows, a_size.width );
907 CV_Assert( a_size.height == len );
913 CV_Assert( C.type() == type &&
914 (((flags&GEMM_3_T) == 0 && C.rows == d_size.height && C.cols == d_size.width) ||
915 ((flags&GEMM_3_T) != 0 && C.rows == d_size.width && C.cols == d_size.height)));
918 _matD.create( d_size.height, d_size.width, type );
919 Mat D = _matD.getMat();
920 if( (flags & GEMM_3_T) != 0 && C.data == D.data )
926 if( flags == 0 && 2 <= len && len <= 4 && (len == d_size.width || len == d_size.height) )
930 float* d = D.ptr<float>();
931 const float *a = A.ptr<float>(),
933 *c = (const float*)C.data;
934 size_t d_step = D.step/sizeof(d[0]),
935 a_step = A.step/sizeof(a[0]),
936 b_step = B.step/sizeof(b[0]),
937 c_step = C.data ? C.step/sizeof(c[0]) : 0;
945 if( len == d_size.width && b != d )
947 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
949 float t0 = a[0]*b[0] + a[1]*b[b_step];
950 float t1 = a[0]*b[1] + a[1]*b[b_step+1];
951 d[0] = (float)(t0*alpha + c[0]*beta);
952 d[1] = (float)(t1*alpha + c[1]*beta);
964 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
966 float t0 = a[0]*b[0] + a[1]*b[b_step];
967 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
968 d[0] = (float)(t0*alpha + c[0]*beta);
969 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
976 if( len == d_size.width && b != d )
978 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
980 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
981 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
982 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
983 d[0] = (float)(t0*alpha + c[0]*beta);
984 d[1] = (float)(t1*alpha + c[1]*beta);
985 d[2] = (float)(t2*alpha + c[2]*beta);
997 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
999 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
1000 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
1001 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
1003 d[0] = (float)(t0*alpha + c[0]*beta);
1004 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
1005 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
1012 if( len == d_size.width && b != d )
1014 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1016 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1017 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
1018 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
1019 float t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
1020 d[0] = (float)(t0*alpha + c[0]*beta);
1021 d[1] = (float)(t1*alpha + c[1]*beta);
1022 d[2] = (float)(t2*alpha + c[2]*beta);
1023 d[3] = (float)(t3*alpha + c[3]*beta);
1026 else if( len <= 16 && a != d )
1035 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1037 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1038 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
1039 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
1040 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
1041 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
1042 float t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
1043 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
1044 d[0] = (float)(t0*alpha + c[0]*beta);
1045 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
1046 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
1047 d[d_step*3] = (float)(t3*alpha + c[c_step*3]*beta);
1056 if( type == CV_64F )
1058 double* d = D.ptr<double>();
1059 const double *a = A.ptr<double>(),
1060 *b = B.ptr<double>(),
1061 *c = (const double*)C.data;
1062 size_t d_step = D.step/sizeof(d[0]),
1063 a_step = A.step/sizeof(a[0]),
1064 b_step = B.step/sizeof(b[0]),
1065 c_step = C.data ? C.step/sizeof(c[0]) : 0;
1072 if( len == d_size.width && b != d )
1074 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1076 double t0 = a[0]*b[0] + a[1]*b[b_step];
1077 double t1 = a[0]*b[1] + a[1]*b[b_step+1];
1078 d[0] = t0*alpha + c[0]*beta;
1079 d[1] = t1*alpha + c[1]*beta;
1091 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1093 double t0 = a[0]*b[0] + a[1]*b[b_step];
1094 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
1095 d[0] = t0*alpha + c[0]*beta;
1096 d[d_step] = t1*alpha + c[c_step]*beta;
1103 if( len == d_size.width && b != d )
1105 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1107 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
1108 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
1109 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
1110 d[0] = t0*alpha + c[0]*beta;
1111 d[1] = t1*alpha + c[1]*beta;
1112 d[2] = t2*alpha + c[2]*beta;
1124 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1126 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
1127 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
1128 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
1130 d[0] = t0*alpha + c[0]*beta;
1131 d[d_step] = t1*alpha + c[c_step]*beta;
1132 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
1139 if( len == d_size.width && b != d )
1141 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
1143 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1144 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
1145 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
1146 double t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
1147 d[0] = t0*alpha + c[0]*beta;
1148 d[1] = t1*alpha + c[1]*beta;
1149 d[2] = t2*alpha + c[2]*beta;
1150 d[3] = t3*alpha + c[3]*beta;
1153 else if( d_size.width <= 16 && a != d )
1162 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
1164 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
1165 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
1166 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
1167 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
1168 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
1169 double t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
1170 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
1171 d[0] = t0*alpha + c[0]*beta;
1172 d[d_step] = t1*alpha + c[c_step]*beta;
1173 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
1174 d[d_step*3] = t3*alpha + c[c_step*3]*beta;
1185 size_t b_step = B.step;
1186 GEMMSingleMulFunc singleMulFunc;
1187 GEMMBlockMulFunc blockMulFunc;
1188 GEMMStoreFunc storeFunc;
1189 Mat *matD = &D, tmat;
1191 const uchar* Cdata = C.data;
1192 size_t Cstep = C.data ? (size_t)C.step : 0;
1193 AutoBuffer<uchar> buf;
1195 if( type == CV_32FC1 )
1197 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32f;
1198 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32f;
1199 storeFunc = (GEMMStoreFunc)GEMMStore_32f;
1201 else if( type == CV_64FC1 )
1203 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64f;
1204 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64f;
1205 storeFunc = (GEMMStoreFunc)GEMMStore_64f;
1207 else if( type == CV_32FC2 )
1209 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32fc;
1210 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32fc;
1211 storeFunc = (GEMMStoreFunc)GEMMStore_32fc;
1215 CV_Assert( type == CV_64FC2 );
1216 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64fc;
1217 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64fc;
1218 storeFunc = (GEMMStoreFunc)GEMMStore_64fc;
1221 if( D.data == A.data || D.data == B.data )
1223 tmat_size = d_size.width*d_size.height*CV_ELEM_SIZE(type);
1224 // Allocate tmat later, once the size of buf is known
1228 if( (d_size.width == 1 || len == 1) && !(flags & GEMM_2_T) && B.isContinuous() )
1230 b_step = d_size.width == 1 ? 0 : CV_ELEM_SIZE(type);
1234 /*if( (d_size.width | d_size.height | len) >= 16 && icvBLAS_GEMM_32f_p != 0 )
1236 blas_func = type == CV_32FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32f_p :
1237 type == CV_64FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64f_p :
1238 type == CV_32FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32fc_p :
1239 type == CV_64FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64fc_p : 0;
1244 const char* transa = flags & GEMM_1_T ? "t" : "n";
1245 const char* transb = flags & GEMM_2_T ? "t" : "n";
1250 if( C->data.ptr != D->data.ptr )
1252 if( !(flags & GEMM_3_T) )
1255 cvTranspose( C, D );
1259 if( CV_MAT_DEPTH(type) == CV_32F )
1261 Complex32f _alpha, _beta;
1263 lda = A->step/sizeof(float);
1264 ldb = b_step/sizeof(float);
1265 ldd = D->step/sizeof(float);
1266 _alpha.re = (float)alpha;
1268 _beta.re = C->data.ptr ? (float)beta : 0;
1270 if( CV_MAT_CN(type) == 2 )
1271 lda /= 2, ldb /= 2, ldd /= 2;
1273 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1274 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1275 &_beta, D->data.ptr, &ldd );
1279 CvComplex64f _alpha, _beta;
1281 lda = A->step/sizeof(double);
1282 ldb = b_step/sizeof(double);
1283 ldd = D->step/sizeof(double);
1286 _beta.re = C->data.ptr ? beta : 0;
1288 if( CV_MAT_CN(type) == 2 )
1289 lda /= 2, ldb /= 2, ldd /= 2;
1291 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1292 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1293 &_beta, D->data.ptr, &ldd );
1296 else*/ if( ((d_size.height <= block_lin_size/2 || d_size.width <= block_lin_size/2) &&
1297 len <= 10000) || len <= 10 ||
1298 (d_size.width <= block_lin_size &&
1299 d_size.height <= block_lin_size && len <= block_lin_size) )
1301 if( tmat_size > 0 ) {
1302 buf.allocate(tmat_size);
1303 tmat = Mat(d_size.height, d_size.width, type, (uchar*)buf );
1305 singleMulFunc( A.ptr(), A.step, B.ptr(), b_step, Cdata, Cstep,
1306 matD->ptr(), matD->step, a_size, d_size, alpha, beta, flags );
1310 int is_a_t = flags & GEMM_1_T;
1311 int is_b_t = flags & GEMM_2_T;
1312 int elem_size = CV_ELEM_SIZE(type);
1314 int a_buf_size = 0, b_buf_size, d_buf_size;
1318 int j, k, di = 0, dj = 0, dk = 0;
1320 size_t a_step0, a_step1, b_step0, b_step1, c_step0, c_step1;
1321 int work_elem_size = elem_size << (CV_MAT_DEPTH(type) == CV_32F ? 1 : 0);
1324 a_step0 = A.step, a_step1 = elem_size;
1326 a_step0 = elem_size, a_step1 = A.step;
1329 b_step0 = b_step, b_step1 = elem_size;
1331 b_step0 = elem_size, b_step1 = b_step;
1335 c_step0 = c_step1 = 0;
1338 else if( !(flags & GEMM_3_T) )
1339 c_step0 = C.step, c_step1 = elem_size;
1341 c_step0 = elem_size, c_step1 = C.step;
1343 dm0 = std::min( block_lin_size, d_size.height );
1344 dn0 = std::min( block_lin_size, d_size.width );
1345 dk0_1 = block_size / dm0;
1346 dk0_2 = block_size / dn0;
1347 dk0 = std::min( dk0_1, dk0_2 );
1348 dk0 = std::min( dk0, len );
1349 if( dk0*dm0 > block_size )
1350 dm0 = block_size / dk0;
1351 if( dk0*dn0 > block_size )
1352 dn0 = block_size / dk0;
1354 dk0_1 = (dn0+dn0/8+2) & -2;
1355 b_buf_size = (dk0+dk0/8+1)*dk0_1*elem_size;
1356 d_buf_size = (dk0+dk0/8+1)*dk0_1*work_elem_size;
1360 a_buf_size = (dm0+dm0/8+1)*((dk0+dk0/8+2)&-2)*elem_size;
1364 buf.allocate(d_buf_size + b_buf_size + a_buf_size + tmat_size);
1365 d_buf = (uchar*)buf;
1366 b_buf = d_buf + d_buf_size;
1369 a_buf = b_buf + b_buf_size;
1371 tmat = Mat(d_size.height, d_size.width, type, b_buf + b_buf_size + a_buf_size );
1373 for( i = 0; i < d_size.height; i += di )
1376 if( i + di >= d_size.height || 8*(i + di) + di > 8*d_size.height )
1377 di = d_size.height - i;
1379 for( j = 0; j < d_size.width; j += dj )
1381 uchar* _d = matD->ptr() + i*matD->step + j*elem_size;
1382 const uchar* _c = Cdata + i*c_step0 + j*c_step1;
1383 size_t _d_step = matD->step;
1386 if( j + dj >= d_size.width || 8*(j + dj) + dj > 8*d_size.width )
1387 dj = d_size.width - j;
1393 _d_step = dj*work_elem_size;
1396 for( k = 0; k < len; k += dk )
1398 const uchar* _a = A.ptr() + i*a_step0 + k*a_step1;
1399 size_t _a_step = A.step;
1400 const uchar* _b = B.ptr() + k*b_step0 + j*b_step1;
1401 size_t _b_step = b_step;
1405 if( k + dk >= len || 8*(k + dk) + dk > 8*len )
1409 a_bl_size.width = dk, a_bl_size.height = di;
1411 a_bl_size.width = di, a_bl_size.height = dk;
1413 if( a_buf && is_a_t )
1415 _a_step = dk*elem_size;
1416 GEMM_TransposeBlock( _a, A.step, a_buf, _a_step, a_bl_size, elem_size );
1417 std::swap( a_bl_size.width, a_bl_size.height );
1421 if( dj < d_size.width )
1425 b_size.width = dj, b_size.height = dk;
1427 b_size.width = dk, b_size.height = dj;
1429 _b_step = b_size.width*elem_size;
1430 GEMM_CopyBlock( _b, b_step, b_buf, _b_step, b_size, elem_size );
1435 blockMulFunc( _a, _a_step, _b, _b_step, _d, _d_step,
1436 a_bl_size, Size(dj,di), flags );
1438 singleMulFunc( _a, _a_step, _b, _b_step, _c, Cstep,
1439 _d, _d_step, a_bl_size, Size(dj,di), alpha, beta, flags );
1444 storeFunc( _c, Cstep, _d, _d_step,
1445 matD->ptr(i) + j*elem_size,
1446 matD->step, Size(dj,di), alpha, beta, flags );
1456 /****************************************************************************************\
1458 \****************************************************************************************/
1463 template<typename T, typename WT> static void
1464 transform_( const T* src, T* dst, const WT* m, int len, int scn, int dcn )
1468 if( scn == 2 && dcn == 2 )
1470 for( x = 0; x < len*2; x += 2 )
1472 WT v0 = src[x], v1 = src[x+1];
1473 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]);
1474 T t1 = saturate_cast<T>(m[3]*v0 + m[4]*v1 + m[5]);
1475 dst[x] = t0; dst[x+1] = t1;
1478 else if( scn == 3 && dcn == 3 )
1480 for( x = 0; x < len*3; x += 3 )
1482 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1483 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1484 T t1 = saturate_cast<T>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1485 T t2 = saturate_cast<T>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1486 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1489 else if( scn == 3 && dcn == 1 )
1491 for( x = 0; x < len; x++, src += 3 )
1492 dst[x] = saturate_cast<T>(m[0]*src[0] + m[1]*src[1] + m[2]*src[2] + m[3]);
1494 else if( scn == 4 && dcn == 4 )
1496 for( x = 0; x < len*4; x += 4 )
1498 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2], v3 = src[x+3];
1499 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]*v3 + m[4]);
1500 T t1 = saturate_cast<T>(m[5]*v0 + m[6]*v1 + m[7]*v2 + m[8]*v3 + m[9]);
1501 dst[x] = t0; dst[x+1] = t1;
1502 t0 = saturate_cast<T>(m[10]*v0 + m[11]*v1 + m[12]*v2 + m[13]*v3 + m[14]);
1503 t1 = saturate_cast<T>(m[15]*v0 + m[16]*v1 + m[17]*v2 + m[18]*v3 + m[19]);
1504 dst[x+2] = t0; dst[x+3] = t1;
1509 for( x = 0; x < len; x++, src += scn, dst += dcn )
1513 for( j = 0; j < dcn; j++, _m += scn + 1 )
1516 for( k = 0; k < scn; k++ )
1518 dst[j] = saturate_cast<T>(s);
1527 load3x3Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3 )
1529 m0 = _mm_setr_ps(m[0], m[4], m[8], 0);
1530 m1 = _mm_setr_ps(m[1], m[5], m[9], 0);
1531 m2 = _mm_setr_ps(m[2], m[6], m[10], 0);
1532 m3 = _mm_setr_ps(m[3], m[7], m[11], 0);
1536 load4x4Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3, __m128& m4 )
1538 m0 = _mm_setr_ps(m[0], m[5], m[10], m[15]);
1539 m1 = _mm_setr_ps(m[1], m[6], m[11], m[16]);
1540 m2 = _mm_setr_ps(m[2], m[7], m[12], m[17]);
1541 m3 = _mm_setr_ps(m[3], m[8], m[13], m[18]);
1542 m4 = _mm_setr_ps(m[4], m[9], m[14], m[19]);
1548 transform_8u( const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn )
1551 const int BITS = 10, SCALE = 1 << BITS;
1552 const float MAX_M = (float)(1 << (15 - BITS));
1554 if( USE_SSE2 && scn == 3 && dcn == 3 &&
1555 std::abs(m[0]) < MAX_M && std::abs(m[1]) < MAX_M && std::abs(m[2]) < MAX_M && std::abs(m[3]) < MAX_M*256 &&
1556 std::abs(m[4]) < MAX_M && std::abs(m[5]) < MAX_M && std::abs(m[6]) < MAX_M && std::abs(m[7]) < MAX_M*256 &&
1557 std::abs(m[8]) < MAX_M && std::abs(m[9]) < MAX_M && std::abs(m[10]) < MAX_M && std::abs(m[11]) < MAX_M*256 )
1559 // faster fixed-point transformation
1560 short m00 = saturate_cast<short>(m[0]*SCALE), m01 = saturate_cast<short>(m[1]*SCALE),
1561 m02 = saturate_cast<short>(m[2]*SCALE), m10 = saturate_cast<short>(m[4]*SCALE),
1562 m11 = saturate_cast<short>(m[5]*SCALE), m12 = saturate_cast<short>(m[6]*SCALE),
1563 m20 = saturate_cast<short>(m[8]*SCALE), m21 = saturate_cast<short>(m[9]*SCALE),
1564 m22 = saturate_cast<short>(m[10]*SCALE);
1565 int m03 = saturate_cast<int>((m[3]+0.5f)*SCALE), m13 = saturate_cast<int>((m[7]+0.5f)*SCALE ),
1566 m23 = saturate_cast<int>((m[11]+0.5f)*SCALE);
1568 __m128i m0 = _mm_setr_epi16(0, m00, m01, m02, m00, m01, m02, 0);
1569 __m128i m1 = _mm_setr_epi16(0, m10, m11, m12, m10, m11, m12, 0);
1570 __m128i m2 = _mm_setr_epi16(0, m20, m21, m22, m20, m21, m22, 0);
1571 __m128i m3 = _mm_setr_epi32(m03, m13, m23, 0);
1574 for( ; x <= (len - 8)*3; x += 8*3 )
1576 __m128i z = _mm_setzero_si128(), t0, t1, t2, r0, r1;
1577 __m128i v0 = _mm_loadl_epi64((const __m128i*)(src + x));
1578 __m128i v1 = _mm_loadl_epi64((const __m128i*)(src + x + 8));
1579 __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 16)), v3;
1580 v0 = _mm_unpacklo_epi8(v0, z); // b0 g0 r0 b1 g1 r1 b2 g2
1581 v1 = _mm_unpacklo_epi8(v1, z); // r2 b3 g3 r3 b4 g4 r4 b5
1582 v2 = _mm_unpacklo_epi8(v2, z); // g5 r5 b6 g6 r6 b7 g7 r7
1584 v3 = _mm_srli_si128(v2, 2); // ? b6 g6 r6 b7 g7 r7 0
1585 v2 = _mm_or_si128(_mm_slli_si128(v2, 10), _mm_srli_si128(v1, 6)); // ? b4 g4 r4 b5 g5 r5 ?
1586 v1 = _mm_or_si128(_mm_slli_si128(v1, 6), _mm_srli_si128(v0, 10)); // ? b2 g2 r2 b3 g3 r3 ?
1587 v0 = _mm_slli_si128(v0, 2); // 0 b0 g0 r0 b1 g1 r1 ?
1589 // process pixels 0 & 1
1590 t0 = _mm_madd_epi16(v0, m0); // a0 b0 a1 b1
1591 t1 = _mm_madd_epi16(v0, m1); // c0 d0 c1 d1
1592 t2 = _mm_madd_epi16(v0, m2); // e0 f0 e1 f1
1593 v0 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1594 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1595 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1596 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1597 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v0, t1), _mm_unpackhi_epi64(v0,t1)), m3); // B0 G0 R0 0
1598 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B1 G1 R1 0
1599 r0 = _mm_srai_epi32(r0, BITS);
1600 r1 = _mm_srai_epi32(r1, BITS);
1601 v0 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B0 G0 R0 B1 G1 R1 0
1603 // process pixels 2 & 3
1604 t0 = _mm_madd_epi16(v1, m0); // a0 b0 a1 b1
1605 t1 = _mm_madd_epi16(v1, m1); // c0 d0 c1 d1
1606 t2 = _mm_madd_epi16(v1, m2); // e0 f0 e1 f1
1607 v1 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1608 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1609 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1610 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1611 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v1, t1), _mm_unpackhi_epi64(v1,t1)), m3); // B2 G2 R2 0
1612 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B3 G3 R3 0
1613 r0 = _mm_srai_epi32(r0, BITS);
1614 r1 = _mm_srai_epi32(r1, BITS);
1615 v1 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B2 G2 R2 B3 G3 R3 0
1617 // process pixels 4 & 5
1618 t0 = _mm_madd_epi16(v2, m0); // a0 b0 a1 b1
1619 t1 = _mm_madd_epi16(v2, m1); // c0 d0 c1 d1
1620 t2 = _mm_madd_epi16(v2, m2); // e0 f0 e1 f1
1621 v2 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1622 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1623 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1624 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1625 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v2, t1), _mm_unpackhi_epi64(v2,t1)), m3); // B4 G4 R4 0
1626 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B5 G5 R5 0
1627 r0 = _mm_srai_epi32(r0, BITS);
1628 r1 = _mm_srai_epi32(r1, BITS);
1629 v2 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B4 G4 R4 B5 G5 R5 0
1631 // process pixels 6 & 7
1632 t0 = _mm_madd_epi16(v3, m0); // a0 b0 a1 b1
1633 t1 = _mm_madd_epi16(v3, m1); // c0 d0 c1 d1
1634 t2 = _mm_madd_epi16(v3, m2); // e0 f0 e1 f1
1635 v3 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1636 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1637 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1638 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1639 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v3, t1), _mm_unpackhi_epi64(v3,t1)), m3); // B6 G6 R6 0
1640 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B7 G7 R7 0
1641 r0 = _mm_srai_epi32(r0, BITS);
1642 r1 = _mm_srai_epi32(r1, BITS);
1643 v3 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B6 G6 R6 B7 G7 R7 0
1645 v0 = _mm_or_si128(_mm_srli_si128(v0, 1), _mm_slli_si128(v1, 5));
1646 v1 = _mm_or_si128(_mm_srli_si128(v1, 3), _mm_slli_si128(v2, 3));
1647 v2 = _mm_or_si128(_mm_srli_si128(v2, 5), _mm_slli_si128(v3, 1));
1648 _mm_storel_epi64((__m128i*)(dst + x), v0);
1649 _mm_storel_epi64((__m128i*)(dst + x + 8), v1);
1650 _mm_storel_epi64((__m128i*)(dst + x + 16), v2);
1653 for( ; x < len*3; x += 3 )
1655 int v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1656 uchar t0 = saturate_cast<uchar>((m00*v0 + m01*v1 + m02*v2 + m03)>>BITS);
1657 uchar t1 = saturate_cast<uchar>((m10*v0 + m11*v1 + m12*v2 + m13)>>BITS);
1658 uchar t2 = saturate_cast<uchar>((m20*v0 + m21*v1 + m22*v2 + m23)>>BITS);
1659 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1665 transform_(src, dst, m, len, scn, dcn);
1669 transform_16u( const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn )
1672 if( USE_SSE2 && scn == 3 && dcn == 3 )
1674 __m128 m0, m1, m2, m3;
1675 __m128i delta = _mm_setr_epi16(0,-32768,-32768,-32768,-32768,-32768,-32768,0);
1676 load3x3Matrix(m, m0, m1, m2, m3);
1677 m3 = _mm_sub_ps(m3, _mm_setr_ps(32768.f, 32768.f, 32768.f, 0.f));
1680 for( ; x <= (len - 4)*3; x += 4*3 )
1682 __m128i z = _mm_setzero_si128();
1683 __m128i v0 = _mm_loadu_si128((const __m128i*)(src + x)), v1;
1684 __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 8)), v3;
1685 v1 = _mm_unpacklo_epi16(_mm_srli_si128(v0, 6), z); // b1 g1 r1
1686 v3 = _mm_unpacklo_epi16(_mm_srli_si128(v2, 2), z); // b3 g3 r3
1687 v2 = _mm_or_si128(_mm_srli_si128(v0, 12), _mm_slli_si128(v2, 4));
1688 v0 = _mm_unpacklo_epi16(v0, z); // b0 g0 r0
1689 v2 = _mm_unpacklo_epi16(v2, z); // b2 g2 r2
1690 __m128 x0 = _mm_cvtepi32_ps(v0), x1 = _mm_cvtepi32_ps(v1);
1691 __m128 x2 = _mm_cvtepi32_ps(v2), x3 = _mm_cvtepi32_ps(v3);
1692 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1693 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1694 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1695 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3);
1696 __m128 y1 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1697 _mm_mul_ps(m0, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(0,0,0,0))),
1698 _mm_mul_ps(m1, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(1,1,1,1)))),
1699 _mm_mul_ps(m2, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(2,2,2,2)))), m3);
1700 __m128 y2 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1701 _mm_mul_ps(m0, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(0,0,0,0))),
1702 _mm_mul_ps(m1, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(1,1,1,1)))),
1703 _mm_mul_ps(m2, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(2,2,2,2)))), m3);
1704 __m128 y3 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1705 _mm_mul_ps(m0, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(0,0,0,0))),
1706 _mm_mul_ps(m1, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(1,1,1,1)))),
1707 _mm_mul_ps(m2, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(2,2,2,2)))), m3);
1708 v0 = _mm_cvtps_epi32(y0); v1 = _mm_cvtps_epi32(y1);
1709 v2 = _mm_cvtps_epi32(y2); v3 = _mm_cvtps_epi32(y3);
1711 v0 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v0,4), v1), delta); // 0 b0 g0 r0 b1 g1 r1 0
1712 v2 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v2,4), v3), delta); // 0 b2 g2 r2 b3 g3 r3 0
1713 v1 = _mm_or_si128(_mm_srli_si128(v0,2), _mm_slli_si128(v2,10)); // b0 g0 r0 b1 g1 r1 b2 g2
1714 v2 = _mm_srli_si128(v2, 6); // r2 b3 g3 r3 0 0 0 0
1715 _mm_storeu_si128((__m128i*)(dst + x), v1);
1716 _mm_storel_epi64((__m128i*)(dst + x + 8), v2);
1719 for( ; x < len*3; x += 3 )
1721 float v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1722 ushort t0 = saturate_cast<ushort>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1723 ushort t1 = saturate_cast<ushort>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1724 ushort t2 = saturate_cast<ushort>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1725 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1731 transform_(src, dst, m, len, scn, dcn);
1736 transform_32f( const float* src, float* dst, const float* m, int len, int scn, int dcn )
1742 if( scn == 3 && dcn == 3 )
1744 __m128 m0, m1, m2, m3;
1745 load3x3Matrix(m, m0, m1, m2, m3);
1747 for( ; x < (len - 1)*3; x += 3 )
1749 __m128 x0 = _mm_loadu_ps(src + x);
1750 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1751 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1752 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1753 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3);
1754 _mm_storel_pi((__m64*)(dst + x), y0);
1755 _mm_store_ss(dst + x + 2, _mm_movehl_ps(y0,y0));
1758 for( ; x < len*3; x += 3 )
1760 float v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1761 float t0 = saturate_cast<float>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1762 float t1 = saturate_cast<float>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1763 float t2 = saturate_cast<float>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1764 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1769 if( scn == 4 && dcn == 4 )
1771 __m128 m0, m1, m2, m3, m4;
1772 load4x4Matrix(m, m0, m1, m2, m3, m4);
1774 for( ; x < len*4; x += 4 )
1776 __m128 x0 = _mm_loadu_ps(src + x);
1777 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(_mm_add_ps(
1778 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1779 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1780 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))),
1781 _mm_mul_ps(m3, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(3,3,3,3)))), m4);
1782 _mm_storeu_ps(dst + x, y0);
1789 transform_(src, dst, m, len, scn, dcn);
1794 transform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn)
1796 transform_(src, dst, m, len, scn, dcn);
1800 transform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn)
1802 transform_(src, dst, m, len, scn, dcn);
1806 transform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn)
1808 transform_(src, dst, m, len, scn, dcn);
1812 transform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
1814 transform_(src, dst, m, len, scn, dcn);
1817 template<typename T, typename WT> static void
1818 diagtransform_( const T* src, T* dst, const WT* m, int len, int cn, int )
1824 for( x = 0; x < len*2; x += 2 )
1826 T t0 = saturate_cast<T>(m[0]*src[x] + m[2]);
1827 T t1 = saturate_cast<T>(m[4]*src[x+1] + m[5]);
1828 dst[x] = t0; dst[x+1] = t1;
1833 for( x = 0; x < len*3; x += 3 )
1835 T t0 = saturate_cast<T>(m[0]*src[x] + m[3]);
1836 T t1 = saturate_cast<T>(m[5]*src[x+1] + m[7]);
1837 T t2 = saturate_cast<T>(m[10]*src[x+2] + m[11]);
1838 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1843 for( x = 0; x < len*4; x += 4 )
1845 T t0 = saturate_cast<T>(m[0]*src[x] + m[4]);
1846 T t1 = saturate_cast<T>(m[6]*src[x+1] + m[9]);
1847 dst[x] = t0; dst[x+1] = t1;
1848 t0 = saturate_cast<T>(m[12]*src[x+2] + m[14]);
1849 t1 = saturate_cast<T>(m[18]*src[x+3] + m[19]);
1850 dst[x+2] = t0; dst[x+3] = t1;
1855 for( x = 0; x < len; x++, src += cn, dst += cn )
1858 for( int j = 0; j < cn; j++, _m += cn + 1 )
1859 dst[j] = saturate_cast<T>(src[j]*_m[j] + _m[cn]);
1865 diagtransform_8u(const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn)
1867 diagtransform_(src, dst, m, len, scn, dcn);
1871 diagtransform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn)
1873 diagtransform_(src, dst, m, len, scn, dcn);
1877 diagtransform_16u(const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn)
1879 diagtransform_(src, dst, m, len, scn, dcn);
1883 diagtransform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn)
1885 diagtransform_(src, dst, m, len, scn, dcn);
1889 diagtransform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn)
1891 diagtransform_(src, dst, m, len, scn, dcn);
1895 diagtransform_32f(const float* src, float* dst, const float* m, int len, int scn, int dcn)
1897 diagtransform_(src, dst, m, len, scn, dcn);
1901 diagtransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
1903 diagtransform_(src, dst, m, len, scn, dcn);
1907 typedef void (*TransformFunc)( const uchar* src, uchar* dst, const uchar* m, int, int, int );
1909 static TransformFunc getTransformFunc(int depth)
1911 static TransformFunc transformTab[] =
1913 (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u,
1914 (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f,
1915 (TransformFunc)transform_64f, 0
1918 return transformTab[depth];
1921 static TransformFunc getDiagTransformFunc(int depth)
1923 static TransformFunc diagTransformTab[] =
1925 (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u,
1926 (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f,
1927 (TransformFunc)diagtransform_64f, 0
1930 return diagTransformTab[depth];
1935 void cv::transform( InputArray _src, OutputArray _dst, InputArray _mtx )
1937 Mat src = _src.getMat(), m = _mtx.getMat();
1938 int depth = src.depth(), scn = src.channels(), dcn = m.rows;
1939 CV_Assert( scn == m.cols || scn + 1 == m.cols );
1940 bool isDiag = false;
1942 _dst.create( src.size(), CV_MAKETYPE(depth, dcn) );
1943 Mat dst = _dst.getMat();
1945 int mtype = depth == CV_32S || depth == CV_64F ? CV_64F : CV_32F;
1946 AutoBuffer<double> _mbuf;
1949 if( !m.isContinuous() || m.type() != mtype || m.cols != scn + 1 )
1951 _mbuf.allocate(dcn*(scn+1));
1952 mbuf = (double*)_mbuf;
1953 Mat tmp(dcn, scn+1, mtype, mbuf);
1954 memset(tmp.ptr(), 0, tmp.total()*tmp.elemSize());
1955 if( m.cols == scn+1 )
1956 m.convertTo(tmp, mtype);
1959 Mat tmppart = tmp.colRange(0, m.cols);
1960 m.convertTo(tmppart, mtype);
1965 mbuf = m.ptr<double>();
1970 double eps = mtype == CV_32F ? FLT_EPSILON : DBL_EPSILON;
1975 if( mtype == CV_32F )
1976 alpha = m.at<float>(0), beta = m.at<float>(1);
1978 alpha = m.at<double>(0), beta = m.at<double>(1);
1979 src.convertTo(dst, dst.type(), alpha, beta);
1983 for( i = 0, isDiag = true; isDiag && i < scn; i++ )
1985 for( j = 0; isDiag && j < scn; j++ )
1987 double v = mtype == CV_32F ? m.at<float>(i, j) : m.at<double>(i, j);
1988 if( i != j && fabs(v) > eps )
1994 TransformFunc func = isDiag ? getDiagTransformFunc(depth): getTransformFunc(depth);
1995 CV_Assert( func != 0 );
1997 const Mat* arrays[] = {&src, &dst, 0};
1999 NAryMatIterator it(arrays, ptrs);
2000 size_t i, total = it.size;
2002 for( i = 0; i < it.nplanes; i++, ++it )
2003 func( ptrs[0], ptrs[1], (uchar*)mbuf, (int)total, scn, dcn );
2006 /****************************************************************************************\
2007 * Perspective Transform *
2008 \****************************************************************************************/
2013 template<typename T> static void
2014 perspectiveTransform_( const T* src, T* dst, const double* m, int len, int scn, int dcn )
2016 const double eps = FLT_EPSILON;
2019 if( scn == 2 && dcn == 2 )
2021 for( i = 0; i < len*2; i += 2 )
2023 T x = src[i], y = src[i + 1];
2024 double w = x*m[6] + y*m[7] + m[8];
2029 dst[i] = (T)((x*m[0] + y*m[1] + m[2])*w);
2030 dst[i+1] = (T)((x*m[3] + y*m[4] + m[5])*w);
2033 dst[i] = dst[i+1] = (T)0;
2036 else if( scn == 3 && dcn == 3 )
2038 for( i = 0; i < len*3; i += 3 )
2040 T x = src[i], y = src[i + 1], z = src[i + 2];
2041 double w = x*m[12] + y*m[13] + z*m[14] + m[15];
2046 dst[i] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3]) * w);
2047 dst[i+1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7]) * w);
2048 dst[i+2] = (T)((x*m[8] + y*m[9] + z*m[10] + m[11]) * w);
2051 dst[i] = dst[i+1] = dst[i+2] = (T)0;
2054 else if( scn == 3 && dcn == 2 )
2056 for( i = 0; i < len; i++, src += 3, dst += 2 )
2058 T x = src[0], y = src[1], z = src[2];
2059 double w = x*m[8] + y*m[9] + z*m[10] + m[11];
2064 dst[0] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3])*w);
2065 dst[1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7])*w);
2068 dst[0] = dst[1] = (T)0;
2073 for( i = 0; i < len; i++, src += scn, dst += dcn )
2075 const double* _m = m + dcn*(scn + 1);
2078 for( k = 0; k < scn; k++ )
2083 for( j = 0; j < dcn; j++, _m += scn + 1 )
2086 for( k = 0; k < scn; k++ )
2092 for( j = 0; j < dcn; j++ )
2100 perspectiveTransform_32f(const float* src, float* dst, const double* m, int len, int scn, int dcn)
2102 perspectiveTransform_(src, dst, m, len, scn, dcn);
2106 perspectiveTransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn)
2108 perspectiveTransform_(src, dst, m, len, scn, dcn);
2113 void cv::perspectiveTransform( InputArray _src, OutputArray _dst, InputArray _mtx )
2115 Mat src = _src.getMat(), m = _mtx.getMat();
2116 int depth = src.depth(), scn = src.channels(), dcn = m.rows-1;
2117 CV_Assert( scn + 1 == m.cols );
2118 CV_Assert( depth == CV_32F || depth == CV_64F );
2120 _dst.create( src.size(), CV_MAKETYPE(depth, dcn) );
2121 Mat dst = _dst.getMat();
2123 const int mtype = CV_64F;
2124 AutoBuffer<double> _mbuf;
2125 double* mbuf = _mbuf;
2127 if( !m.isContinuous() || m.type() != mtype )
2129 _mbuf.allocate((dcn+1)*(scn+1));
2130 Mat tmp(dcn+1, scn+1, mtype, (double*)_mbuf);
2131 m.convertTo(tmp, mtype);
2135 mbuf = m.ptr<double>();
2137 TransformFunc func = depth == CV_32F ?
2138 (TransformFunc)perspectiveTransform_32f :
2139 (TransformFunc)perspectiveTransform_64f;
2140 CV_Assert( func != 0 );
2142 const Mat* arrays[] = {&src, &dst, 0};
2144 NAryMatIterator it(arrays, ptrs);
2145 size_t i, total = it.size;
2147 for( i = 0; i < it.nplanes; i++, ++it )
2148 func( ptrs[0], ptrs[1], (uchar*)mbuf, (int)total, scn, dcn );
2151 /****************************************************************************************\
2153 \****************************************************************************************/
2158 static void scaleAdd_32f(const float* src1, const float* src2, float* dst,
2159 int len, float* _alpha)
2161 float alpha = *_alpha;
2166 __m128 a4 = _mm_set1_ps(alpha);
2167 if( (((size_t)src1|(size_t)src2|(size_t)dst) & 15) == 0 )
2168 for( ; i <= len - 8; i += 8 )
2170 __m128 x0, x1, y0, y1, t0, t1;
2171 x0 = _mm_load_ps(src1 + i); x1 = _mm_load_ps(src1 + i + 4);
2172 y0 = _mm_load_ps(src2 + i); y1 = _mm_load_ps(src2 + i + 4);
2173 t0 = _mm_add_ps(_mm_mul_ps(x0, a4), y0);
2174 t1 = _mm_add_ps(_mm_mul_ps(x1, a4), y1);
2175 _mm_store_ps(dst + i, t0);
2176 _mm_store_ps(dst + i + 4, t1);
2179 for( ; i <= len - 8; i += 8 )
2181 __m128 x0, x1, y0, y1, t0, t1;
2182 x0 = _mm_loadu_ps(src1 + i); x1 = _mm_loadu_ps(src1 + i + 4);
2183 y0 = _mm_loadu_ps(src2 + i); y1 = _mm_loadu_ps(src2 + i + 4);
2184 t0 = _mm_add_ps(_mm_mul_ps(x0, a4), y0);
2185 t1 = _mm_add_ps(_mm_mul_ps(x1, a4), y1);
2186 _mm_storeu_ps(dst + i, t0);
2187 _mm_storeu_ps(dst + i + 4, t1);
2194 for ( ; i <= len - 4; i += 4)
2196 float32x4_t v_src1 = vld1q_f32(src1 + i), v_src2 = vld1q_f32(src2 + i);
2197 vst1q_f32(dst + i, vaddq_f32(vmulq_n_f32(v_src1, alpha), v_src2));
2202 //vz why do we need unroll here?
2203 for( ; i <= len - 4; i += 4 )
2206 t0 = src1[i]*alpha + src2[i];
2207 t1 = src1[i+1]*alpha + src2[i+1];
2208 dst[i] = t0; dst[i+1] = t1;
2209 t0 = src1[i+2]*alpha + src2[i+2];
2210 t1 = src1[i+3]*alpha + src2[i+3];
2211 dst[i+2] = t0; dst[i+3] = t1;
2213 for(; i < len; i++ )
2214 dst[i] = src1[i]*alpha + src2[i];
2218 static void scaleAdd_64f(const double* src1, const double* src2, double* dst,
2219 int len, double* _alpha)
2221 double alpha = *_alpha;
2224 if( USE_SSE2 && (((size_t)src1|(size_t)src2|(size_t)dst) & 15) == 0 )
2226 __m128d a2 = _mm_set1_pd(alpha);
2227 for( ; i <= len - 4; i += 4 )
2229 __m128d x0, x1, y0, y1, t0, t1;
2230 x0 = _mm_load_pd(src1 + i); x1 = _mm_load_pd(src1 + i + 2);
2231 y0 = _mm_load_pd(src2 + i); y1 = _mm_load_pd(src2 + i + 2);
2232 t0 = _mm_add_pd(_mm_mul_pd(x0, a2), y0);
2233 t1 = _mm_add_pd(_mm_mul_pd(x1, a2), y1);
2234 _mm_store_pd(dst + i, t0);
2235 _mm_store_pd(dst + i + 2, t1);
2240 //vz why do we need unroll here?
2241 for( ; i <= len - 4; i += 4 )
2244 t0 = src1[i]*alpha + src2[i];
2245 t1 = src1[i+1]*alpha + src2[i+1];
2246 dst[i] = t0; dst[i+1] = t1;
2247 t0 = src1[i+2]*alpha + src2[i+2];
2248 t1 = src1[i+3]*alpha + src2[i+3];
2249 dst[i+2] = t0; dst[i+3] = t1;
2251 for(; i < len; i++ )
2252 dst[i] = src1[i]*alpha + src2[i];
2255 typedef void (*ScaleAddFunc)(const uchar* src1, const uchar* src2, uchar* dst, int len, const void* alpha);
2259 static bool ocl_scaleAdd( InputArray _src1, double alpha, InputArray _src2, OutputArray _dst, int type )
2261 const ocl::Device & d = ocl::Device::getDefault();
2263 bool doubleSupport = d.doubleFPConfig() > 0;
2264 Size size = _src1.size();
2265 int depth = CV_MAT_DEPTH(type);
2266 if ( (!doubleSupport && depth == CV_64F) || size != _src2.size() )
2269 _dst.create(size, type);
2270 int cn = CV_MAT_CN(type), wdepth = std::max(depth, CV_32F);
2271 int kercn = ocl::predictOptimalVectorWidthMax(_src1, _src2, _dst),
2272 rowsPerWI = d.isIntel() ? 4 : 1;
2275 ocl::Kernel k("KF", ocl::core::arithm_oclsrc,
2276 format("-D OP_SCALE_ADD -D BINARY_OP -D dstT=%s -D workT=%s -D convertToWT1=%s"
2277 " -D srcT1=dstT -D srcT2=dstT -D convertToDT=%s -D workT1=%s"
2278 " -D wdepth=%d%s -D rowsPerWI=%d",
2279 ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
2280 ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)),
2281 ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
2282 ocl::convertTypeStr(wdepth, depth, kercn, cvt[1]),
2283 ocl::typeToStr(wdepth), wdepth,
2284 doubleSupport ? " -D DOUBLE_SUPPORT" : "", rowsPerWI));
2288 UMat src1 = _src1.getUMat(), src2 = _src2.getUMat(), dst = _dst.getUMat();
2290 ocl::KernelArg src1arg = ocl::KernelArg::ReadOnlyNoSize(src1),
2291 src2arg = ocl::KernelArg::ReadOnlyNoSize(src2),
2292 dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
2294 if (wdepth == CV_32F)
2295 k.args(src1arg, src2arg, dstarg, (float)alpha);
2297 k.args(src1arg, src2arg, dstarg, alpha);
2299 size_t globalsize[2] = { dst.cols * cn / kercn, (dst.rows + rowsPerWI - 1) / rowsPerWI };
2300 return k.run(2, globalsize, NULL, false);
2307 void cv::scaleAdd( InputArray _src1, double alpha, InputArray _src2, OutputArray _dst )
2309 int type = _src1.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
2310 CV_Assert( type == _src2.type() );
2312 CV_OCL_RUN(_src1.dims() <= 2 && _src2.dims() <= 2 && _dst.isUMat(),
2313 ocl_scaleAdd(_src1, alpha, _src2, _dst, type))
2315 if( depth < CV_32F )
2317 addWeighted(_src1, alpha, _src2, 1, 0, _dst, depth);
2321 Mat src1 = _src1.getMat(), src2 = _src2.getMat();
2322 CV_Assert(src1.size == src2.size);
2324 _dst.create(src1.dims, src1.size, type);
2325 Mat dst = _dst.getMat();
2327 float falpha = (float)alpha;
2328 void* palpha = depth == CV_32F ? (void*)&falpha : (void*)α
2330 ScaleAddFunc func = depth == CV_32F ? (ScaleAddFunc)scaleAdd_32f : (ScaleAddFunc)scaleAdd_64f;
2332 if (src1.isContinuous() && src2.isContinuous() && dst.isContinuous())
2334 size_t len = src1.total()*cn;
2335 func(src1.ptr(), src2.ptr(), dst.ptr(), (int)len, palpha);
2339 const Mat* arrays[] = {&src1, &src2, &dst, 0};
2341 NAryMatIterator it(arrays, ptrs);
2342 size_t i, len = it.size*cn;
2344 for( i = 0; i < it.nplanes; i++, ++it )
2345 func( ptrs[0], ptrs[1], ptrs[2], (int)len, palpha );
2348 /****************************************************************************************\
2349 * Covariation Matrix *
2350 \****************************************************************************************/
2352 void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean, int flags, int ctype )
2354 CV_Assert( data && nsamples > 0 );
2355 Size size = data[0].size();
2356 int sz = size.width * size.height, esz = (int)data[0].elemSize();
2357 int type = data[0].type();
2359 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), _mean.depth()), CV_32F);
2361 if( (flags & CV_COVAR_USE_AVG) != 0 )
2363 CV_Assert( _mean.size() == size );
2364 if( _mean.isContinuous() && _mean.type() == ctype )
2365 mean = _mean.reshape(1, 1);
2368 _mean.convertTo(mean, ctype);
2369 mean = mean.reshape(1, 1);
2373 Mat _data(nsamples, sz, type);
2375 for( int i = 0; i < nsamples; i++ )
2377 CV_Assert( data[i].size() == size && data[i].type() == type );
2378 if( data[i].isContinuous() )
2379 memcpy( _data.ptr(i), data[i].ptr(), sz*esz );
2382 Mat dataRow(size.height, size.width, type, _data.ptr(i));
2383 data[i].copyTo(dataRow);
2387 calcCovarMatrix( _data, covar, mean, (flags & ~(CV_COVAR_ROWS|CV_COVAR_COLS)) | CV_COVAR_ROWS, ctype );
2388 if( (flags & CV_COVAR_USE_AVG) == 0 )
2389 _mean = mean.reshape(1, size.height);
2392 void cv::calcCovarMatrix( InputArray _src, OutputArray _covar, InputOutputArray _mean, int flags, int ctype )
2394 if(_src.kind() == _InputArray::STD_VECTOR_MAT)
2396 std::vector<cv::Mat> src;
2397 _src.getMatVector(src);
2399 CV_Assert( src.size() > 0 );
2401 Size size = src[0].size();
2402 int type = src[0].type();
2404 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), _mean.depth()), CV_32F);
2406 Mat _data(static_cast<int>(src.size()), size.area(), type);
2409 for(std::vector<cv::Mat>::iterator each = src.begin(); each != src.end(); each++, i++ )
2411 CV_Assert( (*each).size() == size && (*each).type() == type );
2412 Mat dataRow(size.height, size.width, type, _data.ptr(i));
2413 (*each).copyTo(dataRow);
2417 if( (flags & CV_COVAR_USE_AVG) != 0 )
2419 CV_Assert( _mean.size() == size );
2421 if( mean.type() != ctype )
2423 mean = _mean.getMat();
2424 _mean.create(mean.size(), ctype);
2425 Mat tmp = _mean.getMat();
2426 mean.convertTo(tmp, ctype);
2430 mean = _mean.getMat().reshape(1, 1);
2433 calcCovarMatrix( _data, _covar, mean, (flags & ~(CV_COVAR_ROWS|CV_COVAR_COLS)) | CV_COVAR_ROWS, ctype );
2435 if( (flags & CV_COVAR_USE_AVG) == 0 )
2437 mean = mean.reshape(1, size.height);
2443 Mat data = _src.getMat(), mean;
2444 CV_Assert( ((flags & CV_COVAR_ROWS) != 0) ^ ((flags & CV_COVAR_COLS) != 0) );
2445 bool takeRows = (flags & CV_COVAR_ROWS) != 0;
2446 int type = data.type();
2447 int nsamples = takeRows ? data.rows : data.cols;
2448 CV_Assert( nsamples > 0 );
2449 Size size = takeRows ? Size(data.cols, 1) : Size(1, data.rows);
2451 if( (flags & CV_COVAR_USE_AVG) != 0 )
2453 mean = _mean.getMat();
2454 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), mean.depth()), CV_32F);
2455 CV_Assert( mean.size() == size );
2456 if( mean.type() != ctype )
2458 _mean.create(mean.size(), ctype);
2459 Mat tmp = _mean.getMat();
2460 mean.convertTo(tmp, ctype);
2466 ctype = std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), CV_32F);
2467 reduce( _src, _mean, takeRows ? 0 : 1, CV_REDUCE_AVG, ctype );
2468 mean = _mean.getMat();
2471 mulTransposed( data, _covar, ((flags & CV_COVAR_NORMAL) == 0) ^ takeRows,
2472 mean, (flags & CV_COVAR_SCALE) != 0 ? 1./nsamples : 1, ctype );
2475 /****************************************************************************************\
2477 \****************************************************************************************/
2479 double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar )
2481 Mat v1 = _v1.getMat(), v2 = _v2.getMat(), icovar = _icovar.getMat();
2482 int type = v1.type(), depth = v1.depth();
2483 Size sz = v1.size();
2484 int i, j, len = sz.width*sz.height*v1.channels();
2485 AutoBuffer<double> buf(len);
2488 CV_Assert( type == v2.type() && type == icovar.type() &&
2489 sz == v2.size() && len == icovar.rows && len == icovar.cols );
2491 sz.width *= v1.channels();
2492 if( v1.isContinuous() && v2.isContinuous() )
2494 sz.width *= sz.height;
2498 if( depth == CV_32F )
2500 const float* src1 = v1.ptr<float>();
2501 const float* src2 = v2.ptr<float>();
2502 size_t step1 = v1.step/sizeof(src1[0]);
2503 size_t step2 = v2.step/sizeof(src2[0]);
2505 const float* mat = icovar.ptr<float>();
2506 size_t matstep = icovar.step/sizeof(mat[0]);
2508 for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
2510 for( i = 0; i < sz.width; i++ )
2511 diff[i] = src1[i] - src2[i];
2515 for( i = 0; i < len; i++, mat += matstep )
2519 #if CV_ENABLE_UNROLLED
2520 for(; j <= len - 4; j += 4 )
2521 row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
2522 diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
2524 for( ; j < len; j++ )
2525 row_sum += diff[j]*mat[j];
2526 result += row_sum * diff[i];
2529 else if( depth == CV_64F )
2531 const double* src1 = v1.ptr<double>();
2532 const double* src2 = v2.ptr<double>();
2533 size_t step1 = v1.step/sizeof(src1[0]);
2534 size_t step2 = v2.step/sizeof(src2[0]);
2536 const double* mat = icovar.ptr<double>();
2537 size_t matstep = icovar.step/sizeof(mat[0]);
2539 for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
2541 for( i = 0; i < sz.width; i++ )
2542 diff[i] = src1[i] - src2[i];
2546 for( i = 0; i < len; i++, mat += matstep )
2550 #if CV_ENABLE_UNROLLED
2551 for(; j <= len - 4; j += 4 )
2552 row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
2553 diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
2555 for( ; j < len; j++ )
2556 row_sum += diff[j]*mat[j];
2557 result += row_sum * diff[i];
2561 CV_Error( CV_StsUnsupportedFormat, "" );
2563 return std::sqrt(result);
2566 /****************************************************************************************\
2568 \****************************************************************************************/
2573 template<typename sT, typename dT> static void
2574 MulTransposedR( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
2577 const sT* src = srcmat.ptr<sT>();
2578 dT* dst = dstmat.ptr<dT>();
2579 const dT* delta = deltamat.ptr<dT>();
2580 size_t srcstep = srcmat.step/sizeof(src[0]);
2581 size_t dststep = dstmat.step/sizeof(dst[0]);
2582 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2583 int delta_cols = deltamat.cols;
2584 Size size = srcmat.size();
2588 int buf_size = size.height*sizeof(dT);
2589 AutoBuffer<uchar> buf;
2591 if( delta && delta_cols < size.width )
2593 assert( delta_cols == 1 );
2596 buf.allocate(buf_size);
2597 col_buf = (dT*)(uchar*)buf;
2599 if( delta && delta_cols < size.width )
2601 delta_buf = col_buf + size.height;
2602 for( i = 0; i < size.height; i++ )
2603 delta_buf[i*4] = delta_buf[i*4+1] =
2604 delta_buf[i*4+2] = delta_buf[i*4+3] = delta[i*deltastep];
2606 deltastep = deltastep ? 4 : 0;
2610 for( i = 0; i < size.width; i++, tdst += dststep )
2612 for( k = 0; k < size.height; k++ )
2613 col_buf[k] = src[k*srcstep+i];
2615 for( j = i; j <= size.width - 4; j += 4 )
2617 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2618 const sT *tsrc = src + j;
2620 for( k = 0; k < size.height; k++, tsrc += srcstep )
2622 double a = col_buf[k];
2629 tdst[j] = (dT)(s0*scale);
2630 tdst[j+1] = (dT)(s1*scale);
2631 tdst[j+2] = (dT)(s2*scale);
2632 tdst[j+3] = (dT)(s3*scale);
2635 for( ; j < size.width; j++ )
2638 const sT *tsrc = src + j;
2640 for( k = 0; k < size.height; k++, tsrc += srcstep )
2641 s0 += (double)col_buf[k] * tsrc[0];
2643 tdst[j] = (dT)(s0*scale);
2647 for( i = 0; i < size.width; i++, tdst += dststep )
2650 for( k = 0; k < size.height; k++ )
2651 col_buf[k] = src[k*srcstep+i] - delta[k*deltastep+i];
2653 for( k = 0; k < size.height; k++ )
2654 col_buf[k] = src[k*srcstep+i] - delta_buf[k*deltastep];
2656 for( j = i; j <= size.width - 4; j += 4 )
2658 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2659 const sT *tsrc = src + j;
2660 const dT *d = delta_buf ? delta_buf : delta + j;
2662 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2664 double a = col_buf[k];
2665 s0 += a * (tsrc[0] - d[0]);
2666 s1 += a * (tsrc[1] - d[1]);
2667 s2 += a * (tsrc[2] - d[2]);
2668 s3 += a * (tsrc[3] - d[3]);
2671 tdst[j] = (dT)(s0*scale);
2672 tdst[j+1] = (dT)(s1*scale);
2673 tdst[j+2] = (dT)(s2*scale);
2674 tdst[j+3] = (dT)(s3*scale);
2677 for( ; j < size.width; j++ )
2680 const sT *tsrc = src + j;
2681 const dT *d = delta_buf ? delta_buf : delta + j;
2683 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2684 s0 += (double)col_buf[k] * (tsrc[0] - d[0]);
2686 tdst[j] = (dT)(s0*scale);
2692 template<typename sT, typename dT> static void
2693 MulTransposedL( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
2696 const sT* src = srcmat.ptr<sT>();
2697 dT* dst = dstmat.ptr<dT>();
2698 const dT* delta = deltamat.ptr<dT>();
2699 size_t srcstep = srcmat.step/sizeof(src[0]);
2700 size_t dststep = dstmat.step/sizeof(dst[0]);
2701 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2702 int delta_cols = deltamat.cols;
2703 Size size = srcmat.size();
2707 for( i = 0; i < size.height; i++, tdst += dststep )
2708 for( j = i; j < size.height; j++ )
2711 const sT *tsrc1 = src + i*srcstep;
2712 const sT *tsrc2 = src + j*srcstep;
2714 for( k = 0; k <= size.width - 4; k += 4 )
2715 s += (double)tsrc1[k]*tsrc2[k] + (double)tsrc1[k+1]*tsrc2[k+1] +
2716 (double)tsrc1[k+2]*tsrc2[k+2] + (double)tsrc1[k+3]*tsrc2[k+3];
2717 for( ; k < size.width; k++ )
2718 s += (double)tsrc1[k] * tsrc2[k];
2719 tdst[j] = (dT)(s*scale);
2724 int delta_shift = delta_cols == size.width ? 4 : 0;
2725 AutoBuffer<uchar> buf(size.width*sizeof(dT));
2726 dT* row_buf = (dT*)(uchar*)buf;
2728 for( i = 0; i < size.height; i++, tdst += dststep )
2730 const sT *tsrc1 = src + i*srcstep;
2731 const dT *tdelta1 = delta + i*deltastep;
2733 if( delta_cols < size.width )
2734 for( k = 0; k < size.width; k++ )
2735 row_buf[k] = tsrc1[k] - tdelta1[0];
2737 for( k = 0; k < size.width; k++ )
2738 row_buf[k] = tsrc1[k] - tdelta1[k];
2740 for( j = i; j < size.height; j++ )
2743 const sT *tsrc2 = src + j*srcstep;
2744 const dT *tdelta2 = delta + j*deltastep;
2745 if( delta_cols < size.width )
2747 delta_buf[0] = delta_buf[1] =
2748 delta_buf[2] = delta_buf[3] = tdelta2[0];
2749 tdelta2 = delta_buf;
2751 for( k = 0; k <= size.width-4; k += 4, tdelta2 += delta_shift )
2752 s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]) +
2753 (double)row_buf[k+1]*(tsrc2[k+1] - tdelta2[1]) +
2754 (double)row_buf[k+2]*(tsrc2[k+2] - tdelta2[2]) +
2755 (double)row_buf[k+3]*(tsrc2[k+3] - tdelta2[3]);
2756 for( ; k < size.width; k++, tdelta2++ )
2757 s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]);
2758 tdst[j] = (dT)(s*scale);
2764 typedef void (*MulTransposedFunc)(const Mat& src, Mat& dst, const Mat& delta, double scale);
2768 void cv::mulTransposed( InputArray _src, OutputArray _dst, bool ata,
2769 InputArray _delta, double scale, int dtype )
2771 Mat src = _src.getMat(), delta = _delta.getMat();
2772 const int gemm_level = 100; // boundary above which GEMM is faster.
2773 int stype = src.type();
2774 dtype = std::max(std::max(CV_MAT_DEPTH(dtype >= 0 ? dtype : stype), delta.depth()), CV_32F);
2775 CV_Assert( src.channels() == 1 );
2777 if( !delta.empty() )
2779 CV_Assert( delta.channels() == 1 &&
2780 (delta.rows == src.rows || delta.rows == 1) &&
2781 (delta.cols == src.cols || delta.cols == 1));
2782 if( delta.type() != dtype )
2783 delta.convertTo(delta, dtype);
2786 int dsize = ata ? src.cols : src.rows;
2787 _dst.create( dsize, dsize, dtype );
2788 Mat dst = _dst.getMat();
2790 if( src.data == dst.data || (stype == dtype &&
2791 (dst.cols >= gemm_level && dst.rows >= gemm_level &&
2792 src.cols >= gemm_level && src.rows >= gemm_level)))
2795 const Mat* tsrc = &src;
2796 if( !delta.empty() )
2798 if( delta.size() == src.size() )
2799 subtract( src, delta, src2 );
2802 repeat(delta, src.rows/delta.rows, src.cols/delta.cols, src2);
2803 subtract( src, src2, src2 );
2807 gemm( *tsrc, *tsrc, scale, Mat(), 0, dst, ata ? GEMM_1_T : GEMM_2_T );
2811 MulTransposedFunc func = 0;
2812 if(stype == CV_8U && dtype == CV_32F)
2815 func = MulTransposedR<uchar,float>;
2817 func = MulTransposedL<uchar,float>;
2819 else if(stype == CV_8U && dtype == CV_64F)
2822 func = MulTransposedR<uchar,double>;
2824 func = MulTransposedL<uchar,double>;
2826 else if(stype == CV_16U && dtype == CV_32F)
2829 func = MulTransposedR<ushort,float>;
2831 func = MulTransposedL<ushort,float>;
2833 else if(stype == CV_16U && dtype == CV_64F)
2836 func = MulTransposedR<ushort,double>;
2838 func = MulTransposedL<ushort,double>;
2840 else if(stype == CV_16S && dtype == CV_32F)
2843 func = MulTransposedR<short,float>;
2845 func = MulTransposedL<short,float>;
2847 else if(stype == CV_16S && dtype == CV_64F)
2850 func = MulTransposedR<short,double>;
2852 func = MulTransposedL<short,double>;
2854 else if(stype == CV_32F && dtype == CV_32F)
2857 func = MulTransposedR<float,float>;
2859 func = MulTransposedL<float,float>;
2861 else if(stype == CV_32F && dtype == CV_64F)
2864 func = MulTransposedR<float,double>;
2866 func = MulTransposedL<float,double>;
2868 else if(stype == CV_64F && dtype == CV_64F)
2871 func = MulTransposedR<double,double>;
2873 func = MulTransposedL<double,double>;
2876 CV_Error( CV_StsUnsupportedFormat, "" );
2878 func( src, dst, delta, scale );
2879 completeSymm( dst, false );
2883 /****************************************************************************************\
2885 \****************************************************************************************/
2890 template<typename T> double
2891 dotProd_(const T* src1, const T* src2, int len)
2896 #if CV_ENABLE_UNROLLED
2897 for( ; i <= len - 4; i += 4 )
2898 result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] +
2899 (double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3];
2901 for( ; i < len; i++ )
2902 result += (double)src1[i]*src2[i];
2908 static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
2911 #if ARITHM_USE_IPP && 0
2914 if (0 <= ippiDotProd_8u64f_C1R(src1, (int)(len*sizeof(src1[0])),
2915 src2, (int)(len*sizeof(src2[0])),
2916 ippiSize(len, 1), &r))
2918 CV_IMPL_ADD(CV_IMPL_IPP);
2921 setIppErrorStatus();
2929 int j, len0 = len & -4, blockSize0 = (1 << 13), blockSize;
2930 __m128i z = _mm_setzero_si128();
2931 CV_DECL_ALIGNED(16) int buf[4];
2935 blockSize = std::min(len0 - i, blockSize0);
2938 for( ; j <= blockSize - 16; j += 16 )
2940 __m128i b0 = _mm_loadu_si128((const __m128i*)(src1 + j));
2941 __m128i b1 = _mm_loadu_si128((const __m128i*)(src2 + j));
2942 __m128i s0, s1, s2, s3;
2943 s0 = _mm_unpacklo_epi8(b0, z);
2944 s2 = _mm_unpackhi_epi8(b0, z);
2945 s1 = _mm_unpacklo_epi8(b1, z);
2946 s3 = _mm_unpackhi_epi8(b1, z);
2947 s0 = _mm_madd_epi16(s0, s1);
2948 s2 = _mm_madd_epi16(s2, s3);
2949 s = _mm_add_epi32(s, s0);
2950 s = _mm_add_epi32(s, s2);
2953 for( ; j < blockSize; j += 4 )
2955 __m128i s0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src1 + j)), z);
2956 __m128i s1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src2 + j)), z);
2957 s0 = _mm_madd_epi16(s0, s1);
2958 s = _mm_add_epi32(s, s0);
2961 _mm_store_si128((__m128i*)buf, s);
2962 r += buf[0] + buf[1] + buf[2] + buf[3];
2970 int len0 = len & -8, blockSize0 = (1 << 15), blockSize;
2971 uint32x4_t v_zero = vdupq_n_u32(0u);
2972 CV_DECL_ALIGNED(16) uint buf[4];
2976 blockSize = std::min(len0 - i, blockSize0);
2977 uint32x4_t v_sum = v_zero;
2980 for( ; j <= blockSize - 16; j += 16 )
2982 uint8x16_t v_src1 = vld1q_u8(src1 + j), v_src2 = vld1q_u8(src2 + j);
2984 uint16x8_t v_src10 = vmovl_u8(vget_low_u8(v_src1)), v_src20 = vmovl_u8(vget_low_u8(v_src2));
2985 v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
2986 v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
2988 v_src10 = vmovl_u8(vget_high_u8(v_src1));
2989 v_src20 = vmovl_u8(vget_high_u8(v_src2));
2990 v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
2991 v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
2994 for( ; j <= blockSize - 8; j += 8 )
2996 uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + j)), v_src2 = vmovl_u8(vld1_u8(src2 + j));
2997 v_sum = vmlal_u16(v_sum, vget_low_u16(v_src1), vget_low_u16(v_src2));
2998 v_sum = vmlal_u16(v_sum, vget_high_u16(v_src1), vget_high_u16(v_src2));
3001 vst1q_u32(buf, v_sum);
3002 r += buf[0] + buf[1] + buf[2] + buf[3];
3009 return r + dotProd_(src1, src2, len - i);
3013 static double dotProd_8s(const schar* src1, const schar* src2, int len)
3019 int len0 = len & -8, blockSize0 = (1 << 14), blockSize;
3020 int32x4_t v_zero = vdupq_n_s32(0);
3021 CV_DECL_ALIGNED(16) int buf[4];
3025 blockSize = std::min(len0 - i, blockSize0);
3026 int32x4_t v_sum = v_zero;
3029 for( ; j <= blockSize - 16; j += 16 )
3031 int8x16_t v_src1 = vld1q_s8(src1 + j), v_src2 = vld1q_s8(src2 + j);
3033 int16x8_t v_src10 = vmovl_s8(vget_low_s8(v_src1)), v_src20 = vmovl_s8(vget_low_s8(v_src2));
3034 v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
3035 v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
3037 v_src10 = vmovl_s8(vget_high_s8(v_src1));
3038 v_src20 = vmovl_s8(vget_high_s8(v_src2));
3039 v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
3040 v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
3043 for( ; j <= blockSize - 8; j += 8 )
3045 int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + j)), v_src2 = vmovl_s8(vld1_s8(src2 + j));
3046 v_sum = vmlal_s16(v_sum, vget_low_s16(v_src1), vget_low_s16(v_src2));
3047 v_sum = vmlal_s16(v_sum, vget_high_s16(v_src1), vget_high_s16(v_src2));
3050 vst1q_s32(buf, v_sum);
3051 r += buf[0] + buf[1] + buf[2] + buf[3];
3059 return r + dotProd_(src1, src2, len - i);
3062 static double dotProd_16u(const ushort* src1, const ushort* src2, int len)
3064 #if (ARITHM_USE_IPP == 1)
3068 if (0 <= ippiDotProd_16u64f_C1R(src1, (int)(len*sizeof(src1[0])), src2, (int)(len*sizeof(src2[0])), ippiSize(len, 1), &r))
3070 CV_IMPL_ADD(CV_IMPL_IPP);
3073 setIppErrorStatus();
3076 return dotProd_(src1, src2, len);
3079 static double dotProd_16s(const short* src1, const short* src2, int len)
3081 #if (ARITHM_USE_IPP == 1)
3085 if (0 <= ippiDotProd_16s64f_C1R(src1, (int)(len*sizeof(src1[0])), src2, (int)(len*sizeof(src2[0])), ippiSize(len, 1), &r))
3087 CV_IMPL_ADD(CV_IMPL_IPP);
3090 setIppErrorStatus();
3093 return dotProd_(src1, src2, len);
3096 static double dotProd_32s(const int* src1, const int* src2, int len)
3098 #if (ARITHM_USE_IPP == 1)
3102 if (0 <= ippiDotProd_32s64f_C1R(src1, (int)(len*sizeof(src1[0])), src2, (int)(len*sizeof(src2[0])), ippiSize(len, 1), &r))
3104 CV_IMPL_ADD(CV_IMPL_IPP);
3107 setIppErrorStatus();
3110 return dotProd_(src1, src2, len);
3113 static double dotProd_32f(const float* src1, const float* src2, int len)
3118 #if (ARITHM_USE_IPP == 1)
3121 if (0 <= ippsDotProd_32f64f(src1, src2, len, &r))
3123 CV_IMPL_ADD(CV_IMPL_IPP);
3126 setIppErrorStatus();
3129 int len0 = len & -4, blockSize0 = (1 << 13), blockSize;
3130 float32x4_t v_zero = vdupq_n_f32(0.0f);
3131 CV_DECL_ALIGNED(16) float buf[4];
3135 blockSize = std::min(len0 - i, blockSize0);
3136 float32x4_t v_sum = v_zero;
3139 for( ; j <= blockSize - 4; j += 4 )
3140 v_sum = vmlaq_f32(v_sum, vld1q_f32(src1 + j), vld1q_f32(src2 + j));
3142 vst1q_f32(buf, v_sum);
3143 r += buf[0] + buf[1] + buf[2] + buf[3];
3150 return r + dotProd_(src1, src2, len - i);
3153 static double dotProd_64f(const double* src1, const double* src2, int len)
3155 #if (ARITHM_USE_IPP == 1)
3159 if (0 <= ippsDotProd_64f(src1, src2, len, &r))
3161 CV_IMPL_ADD(CV_IMPL_IPP);
3164 setIppErrorStatus();
3167 return dotProd_(src1, src2, len);
3171 typedef double (*DotProdFunc)(const uchar* src1, const uchar* src2, int len);
3173 static DotProdFunc getDotProdFunc(int depth)
3175 static DotProdFunc dotProdTab[] =
3177 (DotProdFunc)GET_OPTIMIZED(dotProd_8u), (DotProdFunc)GET_OPTIMIZED(dotProd_8s),
3178 (DotProdFunc)dotProd_16u, (DotProdFunc)dotProd_16s,
3179 (DotProdFunc)dotProd_32s, (DotProdFunc)GET_OPTIMIZED(dotProd_32f),
3180 (DotProdFunc)dotProd_64f, 0
3183 return dotProdTab[depth];
3186 double Mat::dot(InputArray _mat) const
3188 Mat mat = _mat.getMat();
3189 int cn = channels();
3190 DotProdFunc func = getDotProdFunc(depth());
3191 CV_Assert( mat.type() == type() && mat.size == size && func != 0 );
3193 if( isContinuous() && mat.isContinuous() )
3195 size_t len = total()*cn;
3196 if( len == (size_t)(int)len )
3197 return func(data, mat.data, (int)len);
3200 const Mat* arrays[] = {this, &mat, 0};
3202 NAryMatIterator it(arrays, ptrs);
3203 int len = (int)(it.size*cn);
3206 for( size_t i = 0; i < it.nplanes; i++, ++it )
3207 r += func( ptrs[0], ptrs[1], len );
3214 /****************************************************************************************\
3216 \****************************************************************************************/
3218 CV_IMPL void cvGEMM( const CvArr* Aarr, const CvArr* Barr, double alpha,
3219 const CvArr* Carr, double beta, CvArr* Darr, int flags )
3221 cv::Mat A = cv::cvarrToMat(Aarr), B = cv::cvarrToMat(Barr);
3222 cv::Mat C, D = cv::cvarrToMat(Darr);
3225 C = cv::cvarrToMat(Carr);
3227 CV_Assert( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)) &&
3228 (D.cols == ((flags & CV_GEMM_B_T) == 0 ? B.cols : B.rows)) &&
3229 D.type() == A.type() );
3231 gemm( A, B, alpha, C, beta, D, flags );
3236 cvTransform( const CvArr* srcarr, CvArr* dstarr,
3237 const CvMat* transmat, const CvMat* shiftvec )
3239 cv::Mat m = cv::cvarrToMat(transmat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
3243 cv::Mat v = cv::cvarrToMat(shiftvec).reshape(1,m.rows),
3244 _m(m.rows, m.cols + 1, m.type()), m1 = _m.colRange(0,m.cols), v1 = _m.col(m.cols);
3245 m.convertTo(m1, m1.type());
3246 v.convertTo(v1, v1.type());
3250 CV_Assert( dst.depth() == src.depth() && dst.channels() == m.rows );
3251 cv::transform( src, dst, m );
3256 cvPerspectiveTransform( const CvArr* srcarr, CvArr* dstarr, const CvMat* mat )
3258 cv::Mat m = cv::cvarrToMat(mat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
3260 CV_Assert( dst.type() == src.type() && dst.channels() == m.rows-1 );
3261 cv::perspectiveTransform( src, dst, m );
3265 CV_IMPL void cvScaleAdd( const CvArr* srcarr1, CvScalar scale,
3266 const CvArr* srcarr2, CvArr* dstarr )
3268 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
3270 CV_Assert( src1.size == dst.size && src1.type() == dst.type() );
3271 cv::scaleAdd( src1, scale.val[0], cv::cvarrToMat(srcarr2), dst );
3276 cvCalcCovarMatrix( const CvArr** vecarr, int count,
3277 CvArr* covarr, CvArr* avgarr, int flags )
3279 cv::Mat cov0 = cv::cvarrToMat(covarr), cov = cov0, mean0, mean;
3280 CV_Assert( vecarr != 0 && count >= 1 );
3283 mean = mean0 = cv::cvarrToMat(avgarr);
3285 if( (flags & CV_COVAR_COLS) != 0 || (flags & CV_COVAR_ROWS) != 0 )
3288 cv::Mat data = cv::cvarrToMat(vecarr[0]);
3289 cv::calcCovarMatrix( data, cov, mean, flags, cov.type() );
3293 std::vector<cv::Mat> data(count);
3294 for( int i = 0; i < count; i++ )
3295 data[i] = cv::cvarrToMat(vecarr[i]);
3296 cv::calcCovarMatrix( &data[0], count, cov, mean, flags, cov.type() );
3299 if( mean.data != mean0.data && mean0.data )
3300 mean.convertTo(mean0, mean0.type());
3302 if( cov.data != cov0.data )
3303 cov.convertTo(cov0, cov0.type());
3308 cvMahalanobis( const CvArr* srcAarr, const CvArr* srcBarr, const CvArr* matarr )
3310 return cv::Mahalanobis(cv::cvarrToMat(srcAarr),
3311 cv::cvarrToMat(srcBarr), cv::cvarrToMat(matarr));
3315 cvMulTransposed( const CvArr* srcarr, CvArr* dstarr,
3316 int order, const CvArr* deltaarr, double scale )
3318 cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0, delta;
3320 delta = cv::cvarrToMat(deltaarr);
3321 cv::mulTransposed( src, dst, order != 0, delta, scale, dst.type());
3322 if( dst.data != dst0.data )
3323 dst.convertTo(dst0, dst0.type());
3326 CV_IMPL double cvDotProduct( const CvArr* srcAarr, const CvArr* srcBarr )
3328 return cv::cvarrToMat(srcAarr).dot(cv::cvarrToMat(srcBarr));
3333 cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigenvects, int flags )
3335 cv::Mat data = cv::cvarrToMat(data_arr), mean0 = cv::cvarrToMat(avg_arr);
3336 cv::Mat evals0 = cv::cvarrToMat(eigenvals), evects0 = cv::cvarrToMat(eigenvects);
3337 cv::Mat mean = mean0, evals = evals0, evects = evects0;
3341 pca.eigenvalues = evals;
3342 pca.eigenvectors = evects;
3344 pca(data, (flags & CV_PCA_USE_AVG) ? mean : cv::Mat(),
3345 flags, !evals.empty() ? evals.rows + evals.cols - 1 : 0);
3347 if( pca.mean.size() == mean.size() )
3348 pca.mean.convertTo( mean, mean.type() );
3351 cv::Mat temp; pca.mean.convertTo( temp, mean.type() );
3352 transpose( temp, mean );
3355 evals = pca.eigenvalues;
3356 evects = pca.eigenvectors;
3357 int ecount0 = evals0.cols + evals0.rows - 1;
3358 int ecount = evals.cols + evals.rows - 1;
3360 CV_Assert( (evals0.cols == 1 || evals0.rows == 1) &&
3361 ecount0 <= ecount &&
3362 evects0.cols == evects.cols &&
3363 evects0.rows == ecount0 );
3365 cv::Mat temp = evals0;
3366 if( evals.rows == 1 )
3367 evals.colRange(0, ecount0).convertTo(temp, evals0.type());
3369 evals.rowRange(0, ecount0).convertTo(temp, evals0.type());
3370 if( temp.data != evals0.data )
3371 transpose(temp, evals0);
3372 evects.rowRange(0, ecount0).convertTo( evects0, evects0.type() );
3374 // otherwise some datatype's or size's were incorrect, so the output arrays have been reallocated
3375 CV_Assert( mean0.data == mean.data );
3380 cvProjectPCA( const CvArr* data_arr, const CvArr* avg_arr,
3381 const CvArr* eigenvects, CvArr* result_arr )
3383 cv::Mat data = cv::cvarrToMat(data_arr), mean = cv::cvarrToMat(avg_arr);
3384 cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0;
3389 if( mean.rows == 1 )
3391 CV_Assert(dst.cols <= evects.rows && dst.rows == data.rows);
3396 CV_Assert(dst.rows <= evects.rows && dst.cols == data.cols);
3399 pca.eigenvectors = evects.rowRange(0, n);
3401 cv::Mat result = pca.project(data);
3402 if( result.cols != dst.cols )
3403 result = result.reshape(1, 1);
3404 result.convertTo(dst, dst.type());
3406 CV_Assert(dst0.data == dst.data);
3411 cvBackProjectPCA( const CvArr* proj_arr, const CvArr* avg_arr,
3412 const CvArr* eigenvects, CvArr* result_arr )
3414 cv::Mat data = cv::cvarrToMat(proj_arr), mean = cv::cvarrToMat(avg_arr);
3415 cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0;
3420 if( mean.rows == 1 )
3422 CV_Assert(data.cols <= evects.rows && dst.rows == data.rows);
3427 CV_Assert(data.rows <= evects.rows && dst.cols == data.cols);
3430 pca.eigenvectors = evects.rowRange(0, n);
3432 cv::Mat result = pca.backProject(data);
3433 result.convertTo(dst, dst.type());
3435 CV_Assert(dst0.data == dst.data);