--- /dev/null
+/****************************************Implementation**Details**********************************************/
+/* */
+/* Lets denote (a,a1i) complex which is mathematically a+a1*i */
+/* Complex number multiplication: (a,a1i)*(b,b1i) */
+/* As i*i=-1 .The multiplication result will be: */
+/* (a+a1*i)(b+b1*i)=a*b+a1*i*b1*i+ a1*i*b+a*b1*i=a*b-a1*b1 + (a1*b+a*b1)*i which is (ab-a1b1,a1b+ab1) */
+/* so let c= ab-a1b1 , ci=a1b+ab1 then */
+/* c=c+a*b-a1*b1 => c=a*b-( a1*b1-c) => c= a1*b1-c then c=a*b-c two mseb */
+/* ci=ci+a1*b+a*b1 => ci= a1*b+ci then ci= a*b1+ci */
+/* For simd real and imaginary parts will be grouped together */
+/* such (realA,realK) and (imageA ,imageK) */
+/* Simd(0,1)=(a*b,k*b)-((ai*bi,ki*bi)-Simd(0,1)) */
+/* SimdI(0,1)=SimdI(0,1)+(a*bi,k*bi)+(ai*b,ki*b) */
+/* */
+/* */
+/* for defined(NR) || defined(NC) || defined(TR) || defined(TC) */
+/* (a+a1*I)(b-b1*I)=ab+a1*b1+I(a1b-ab1) */
+/* */
+/* c=c+ab+a1b1 => c=a1b1+c;c=ab+c */
+/* ci=ci+a1b-ab1 => ci=a1*b-(ab1-ci) => ci=ab1-ci; ci=a1*b-ci */
+/* */
+/* */
+/* for defined(RN) || defined(RT) || defined(CN) || defined(CT) */
+/* (a-a1*I)(b+b1*I)=ab+a1*b1+I(-a1b+ab1) */
+/* */
+/* c=c+ab+a1b1 => c=a1b1+c;c=ab+c */
+/* ci=ci+a1b-ab1 => ci=a*b1-(a1b-ci) => ci=a1b-ci; ci=a*b1-ci */
+/* */
+/* */
+/* for defined(RR) || defined(RC) || defined(CR) || defined(CC) */
+/* (a-a1*I)(b-b1*I)=ab-a1*b1+I(-a1b-ab1) */
+/* */
+/* c= a1*b1-c then c=a*b-c */
+/* ci = ci-a1*b -a*b1; */
+/* as ibm z13 only has x*z-m x*z+m instructions implementation will be changed a bit */
+/* Assuming ci=0; and cix=cix+a1b+ab1 ; ci=ci-cix will work */
+/* cix= a*b1+cix ; cix= a1*b+cix (two madb) ci=ci-cix (sign change if ci=0) */
+/* As c=0 then */
+/* c=a*b-c then c=a1*b1-c => c=(a1*b1-(a*b-c)) which is -1*( a*b -(a1*b1-c)) */
+/* */
+/* Values will be equal to (-c) and (-ci) */
+/* To change sign it'll be multiplied by -1*(alpha+alpha_i) */
+/* This is done once: */
+/* lcdbr ALPHA_I,ALPHA_I */
+/* lcdbr ALPHA ,ALPHA */
+/*************************************************************************************************************/
+
+/*************************Zero vectors***************************************/
+/*zero vectors for 4x4 */
+.macro ZERO_ZCVEC_4x4
+ vzero %v16
+ vzero %v17
+ vzero %v18
+ vzero %v19
+ vzero %v20
+ vzero %v21
+ vzero %v22
+ vzero %v23
+ vzero %v24
+ vzero %v25
+ vzero %v26
+ vzero %v27
+ vzero %v28
+ vzero %v29
+ vzero %v30
+ vzero %v31
+.endm
+
+/*zero vectors for */
+.macro ZERO_ZCVEC_2x4
+ vzero %v16
+ vzero %v17
+ vzero %v18
+ vzero %v19
+ vzero %v20
+ vzero %v21
+ vzero %v22
+ vzero %v23
+.endm
+
+/*zero vectors for */
+.macro ZERO_ZCVEC_1x4
+ vzero %v16
+ vzero %v17
+ vzero %v18
+ vzero %v19
+.endm
+
+/*zero vectors for */
+.macro ZERO_ZCVEC_4x2
+ ZERO_ZCVEC_2x4
+.endm
+
+.macro ZERO_ZCVEC_4x1
+ ZERO_ZCVEC_1x4
+.endm
+
+/*zero vectors for */
+.macro ZERO_ZCVEC_2x2
+ vzero %v16
+ vzero %v17
+ vzero %v20
+ vzero %v21
+.endm
+
+/*zero vectors for */
+.macro ZERO_ZCVEC_1x2
+ vzero %v16
+ vzero %v17
+.endm
+
+/*zero vectors for */
+.macro ZERO_ZCVEC_2x1
+ vzero %v16
+ vzero %v17
+.endm
+
+/*zero vectors for 1x1*/
+.macro ZERO_ZCVEC_1x1
+ lzer %f6
+ lzer %f7
+.endm
+
+
+/*
+ Calculate for 4x2 inner
+*/
+.macro CalcComplex_4x2 vResR1, vResI1, vResR2, vResI2, vResR3, vResI3, vResR4, vResI4, vr1, vi1, vr2, vi2, vrB, viB,vrB2, viB2
+
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ vfmsdb \vResR1, \vi1, \viB, \vResR1
+ vfmadb \vResI1, \vr1, \viB, \vResI1
+ vfmsdb \vResR2, \vi2, \viB, \vResR2
+ vfmadb \vResI2, \vr2, \viB, \vResI2
+
+ vfmsdb \vResR3, \vi1, \viB2, \vResR3
+ vfmadb \vResI3, \vr1, \viB2, \vResI3
+ vfmsdb \vResR4, \vi2, \viB2, \vResR4
+ vfmadb \vResI4, \vr2, \viB2, \vResI4
+
+ vfmsdb \vResR1, \vr1, \vrB, \vResR1
+ vfmadb \vResI1, \vi1, \vrB, \vResI1
+ vfmsdb \vResR2, \vr2, \vrB, \vResR2
+ vfmadb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmsdb \vResR3, \vr1, \vrB2, \vResR3
+ vfmadb \vResI3, \vi1, \vrB2, \vResI3
+ vfmsdb \vResR4, \vr2, \vrB2, \vResR4
+ vfmadb \vResI4, \vi2, \vrB2, \vResI4
+
+ #endif
+
+ #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
+ vfmadb \vResR1, \vi1, \viB, \vResR1
+ vfmsdb \vResI1, \vr1, \viB, \vResI1
+ vfmadb \vResR2, \vi2, \viB, \vResR2
+ vfmsdb \vResI2, \vr2, \viB, \vResI2
+
+ vfmadb \vResR3, \vi1, \viB2, \vResR3
+ vfmsdb \vResI3, \vr1, \viB2, \vResI3
+ vfmadb \vResR4, \vi2, \viB2, \vResR4
+ vfmsdb \vResI4, \vr2, \viB2, \vResI4
+
+ vfmadb \vResR1, \vr1, \vrB, \vResR1
+ vfmsdb \vResI1, \vi1, \vrB, \vResI1
+ vfmadb \vResR2, \vr2, \vrB, \vResR2
+ vfmsdb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmadb \vResR3, \vr1, \vrB2, \vResR3
+ vfmsdb \vResI3, \vi1, \vrB2, \vResI3
+ vfmadb \vResR4, \vr2, \vrB2, \vResR4
+ vfmsdb \vResI4, \vi2, \vrB2, \vResI4
+
+ #endif
+
+ #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
+ vfmadb \vResR1, \vi1, \viB, \vResR1
+ vfmsdb \vResI1, \vi1, \vrB, \vResI1
+ vfmadb \vResR2, \vi2, \viB, \vResR2
+ vfmsdb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmadb \vResR3, \vi1, \viB2, \vResR3
+ vfmsdb \vResI3, \vi1, \vrB2, \vResI3
+ vfmadb \vResR4, \vi2, \viB2, \vResR4
+ vfmsdb \vResI4, \vi2, \vrB2, \vResI4
+
+ vfmadb \vResR1, \vr1, \vrB, \vResR1
+ vfmsdb \vResI1, \vr1, \viB, \vResI1
+ vfmadb \vResR2, \vr2, \vrB, \vResR2
+ vfmsdb \vResI2, \vr2, \viB, \vResI2
+
+ vfmadb \vResR3, \vr1, \vrB2, \vResR3
+ vfmsdb \vResI3, \vr1, \viB2, \vResI3
+ vfmadb \vResR4, \vr2, \vrB2, \vResR4
+ vfmsdb \vResI4, \vr2, \viB2, \vResI4
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+
+ vfmsdb \vResR1, \vr1, \vrB, \vResR1
+ vfmadb \vResI1, \vi1, \vrB, \vResI1
+ vfmsdb \vResR2, \vr2, \vrB, \vResR2
+ vfmadb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmsdb \vResR3, \vr1, \vrB2, \vResR3
+ vfmadb \vResI3, \vi1, \vrB2, \vResI3
+ vfmsdb \vResR4, \vr2, \vrB2, \vResR4
+ vfmadb \vResI4, \vi2, \vrB2, \vResI4
+
+ vfmsdb \vResR1, \vi1, \viB, \vResR1
+ vfmadb \vResI1, \vr1, \viB, \vResI1
+ vfmsdb \vResR2, \vi2, \viB, \vResR2
+ vfmadb \vResI2, \vr2, \viB, \vResI2
+
+ vfmsdb \vResR3, \vi1, \viB2, \vResR3
+ vfmadb \vResI3, \vr1, \viB2, \vResI3
+ vfmsdb \vResR4, \vi2, \viB2, \vResR4
+ vfmadb \vResI4, \vr2, \viB2, \vResI4
+
+
+ #endif
+
+.endm
+
+/*
+ Calculate for 2x4 inner
+*/
+.macro CalcComplex_2x4 vResR1, vResI1, vResR2, vResI2, vResR3, vResI3, vResR4, vResI4, vr1, vi1, vr2, vi2, vrB, viB,vrB2, viB2
+
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ vfmsdb \vResR1, \vi1, \viB, \vResR1
+ vfmadb \vResI1, \vr1, \viB, \vResI1
+ vfmsdb \vResR2, \vi2, \viB, \vResR2
+ vfmadb \vResI2, \vr2, \viB, \vResI2
+
+ vfmsdb \vResR3, \vi1, \viB2, \vResR3
+ vfmadb \vResI3, \vr1, \viB2, \vResI3
+ vfmsdb \vResR4, \vi2, \viB2, \vResR4
+ vfmadb \vResI4, \vr2, \viB2, \vResI4
+
+ vfmsdb \vResR1, \vr1, \vrB, \vResR1
+ vfmadb \vResI1, \vi1, \vrB, \vResI1
+ vfmsdb \vResR2, \vr2, \vrB, \vResR2
+ vfmadb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmsdb \vResR3, \vr1, \vrB2, \vResR3
+ vfmadb \vResI3, \vi1, \vrB2, \vResI3
+ vfmsdb \vResR4, \vr2, \vrB2, \vResR4
+ vfmadb \vResI4, \vi2, \vrB2, \vResI4
+
+ #endif
+
+ #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
+ vfmadb \vResR1, \vi1, \viB, \vResR1
+ vfmsdb \vResI1, \vr1, \viB, \vResI1
+ vfmadb \vResR2, \vi2, \viB, \vResR2
+ vfmsdb \vResI2, \vr2, \viB, \vResI2
+
+ vfmadb \vResR3, \vi1, \viB2, \vResR3
+ vfmsdb \vResI3, \vr1, \viB2, \vResI3
+ vfmadb \vResR4, \vi2, \viB2, \vResR4
+ vfmsdb \vResI4, \vr2, \viB2, \vResI4
+
+ vfmadb \vResR1, \vr1, \vrB, \vResR1
+ vfmsdb \vResI1, \vi1, \vrB, \vResI1
+ vfmadb \vResR2, \vr2, \vrB, \vResR2
+ vfmsdb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmadb \vResR3, \vr1, \vrB2, \vResR3
+ vfmsdb \vResI3, \vi1, \vrB2, \vResI3
+ vfmadb \vResR4, \vr2, \vrB2, \vResR4
+ vfmsdb \vResI4, \vi2, \vrB2, \vResI4
+
+ #endif
+
+ #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
+ vfmadb \vResR1, \vi1, \viB, \vResR1
+ vfmsdb \vResI1, \vi1, \vrB, \vResI1
+ vfmadb \vResR2, \vi2, \viB, \vResR2
+ vfmsdb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmadb \vResR3, \vi1, \viB2, \vResR3
+ vfmsdb \vResI3, \vi1, \vrB2, \vResI3
+ vfmadb \vResR4, \vi2, \viB2, \vResR4
+ vfmsdb \vResI4, \vi2, \vrB2, \vResI4
+
+ vfmadb \vResR1, \vr1, \vrB, \vResR1
+ vfmsdb \vResI1, \vr1, \viB, \vResI1
+ vfmadb \vResR2, \vr2, \vrB, \vResR2
+ vfmsdb \vResI2, \vr2, \viB, \vResI2
+
+ vfmadb \vResR3, \vr1, \vrB2, \vResR3
+ vfmsdb \vResI3, \vr1, \viB2, \vResI3
+ vfmadb \vResR4, \vr2, \vrB2, \vResR4
+ vfmsdb \vResI4, \vr2, \viB2, \vResI4
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+
+ vfmsdb \vResR1, \vr1, \vrB, \vResR1
+ vfmadb \vResI1, \vi1, \vrB, \vResI1
+ vfmsdb \vResR2, \vr2, \vrB, \vResR2
+ vfmadb \vResI2, \vi2, \vrB, \vResI2
+
+ vfmsdb \vResR3, \vr1, \vrB2, \vResR3
+ vfmadb \vResI3, \vi1, \vrB2, \vResI3
+ vfmsdb \vResR4, \vr2, \vrB2, \vResR4
+ vfmadb \vResI4, \vi2, \vrB2, \vResI4
+
+ vfmsdb \vResR1, \vi1, \viB, \vResR1
+ vfmadb \vResI1, \vr1, \viB, \vResI1
+ vfmsdb \vResR2, \vi2, \viB, \vResR2
+ vfmadb \vResI2, \vr2, \viB, \vResI2
+
+ vfmsdb \vResR3, \vi1, \viB2, \vResR3
+ vfmadb \vResI3, \vr1, \viB2, \vResI3
+ vfmsdb \vResR4, \vi2, \viB2, \vResR4
+ vfmadb \vResI4, \vr2, \viB2, \vResI4
+
+
+ #endif
+
+.endm
+
+/*
+ Calculate for 2x2 inner
+*/
+.macro CalcComplex_2x2 vResR1, vResI1,vResR2, vResI2, vR1, vI1, vRB, vIB, vRB2, vIB2
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ vfmsdb \vResR1, \vI1, \vIB, \vResR1
+ vfmadb \vResI1, \vR1, \vIB, \vResI1
+
+ vfmsdb \vResR2, \vI1, \vIB2, \vResR2
+ vfmadb \vResI2, \vR1, \vIB2, \vResI2
+
+ vfmsdb \vResR1, \vR1, \vRB, \vResR1
+ vfmadb \vResI1, \vI1, \vRB, \vResI1
+
+ vfmsdb \vResR2, \vR1, \vRB2, \vResR2
+ vfmadb \vResI2, \vI1, \vRB2, \vResI2
+ #endif
+
+ #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
+ vfmadb \vResR1, \vI1, \vIB, \vResR1
+ vfmsdb \vResI1, \vR1, \vIB, \vResI1
+
+ vfmadb \vResR2, \vI1, \vIB2, \vResR2
+ vfmsdb \vResI2, \vR1, \vIB2, \vResI2
+
+ vfmadb \vResR1, \vR1, \vRB, \vResR1
+ vfmsdb \vResI1, \vI1, \vRB, \vResI1
+
+ vfmadb \vResR2, \vR1, \vRB2, \vResR2
+ vfmsdb \vResI2, \vI1, \vRB2, \vResI2
+ #endif
+
+ #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
+ vfmadb \vResR1, \vI1, \vIB, \vResR1
+ vfmsdb \vResI1, \vI1, \vRB, \vResI1
+
+ vfmadb \vResR2, \vI1, \vIB2, \vResR2
+ vfmsdb \vResI2, \vI1, \vRB2, \vResI2
+
+ vfmadb \vResR1, \vR1, \vRB, \vResR1
+ vfmsdb \vResI1, \vR1, \vIB, \vResI1
+
+ vfmadb \vResR2, \vR1, \vRB2, \vResR2
+ vfmsdb \vResI2, \vR1, \vIB2, \vResI2
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+ vfmsdb \vResR1, \vR1, \vRB, \vResR1
+ vfmadb \vResI1, \vI1, \vRB, \vResI1
+
+ vfmsdb \vResR2, \vR1, \vRB2, \vResR2
+ vfmadb \vResI2, \vI1, \vRB2, \vResI2
+
+ vfmsdb \vResR1, \vI1, \vIB, \vResR1
+ vfmadb \vResI1, \vR1, \vIB, \vResI1
+
+ vfmsdb \vResR2, \vI1, \vIB2, \vResR2
+ vfmadb \vResI2, \vR1, \vIB2, \vResI2
+ #endif
+.endm
+
+/*
+ Calculate for 2x1 inner
+*/
+.macro CalcComplex_2x1 vRealResult1, vImageResult1, vReal1, vImage1, vecRealB, vecImageB
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ #endif
+
+ #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ #endif
+
+ #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ #endif
+.endm
+
+/*
+ Calculate for 1x2 inner
+*/
+.macro CalcComplex_1x2 vRealResult1, vImageResult1, vReal1, vImage1, vecRealB, vecImageB
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ #endif
+
+ #if defined(RN) || defined(CN) || defined(RT) || defined(CT)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ #endif
+
+ #if defined(NR) || defined(TR) || defined(NC) || defined(TC)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ #endif
+.endm
+
+
+/*
+ Calculate for 4x1 inner
+*/
+.macro CalcComplex_4x1 vRealResult1, vImageResult1, vRealResult2, vImageResult2, vReal1, vImage1, vReal2, vImage2, vecRealB, vecImageB
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmsdb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmadb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmsdb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmadb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ #endif
+
+ #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmadb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmsdb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmadb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmsdb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ #endif
+
+ #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmadb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmsdb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmadb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmsdb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmsdb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmadb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmsdb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmadb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ #endif
+
+.endm
+
+/*
+ Calculate for 1x4 inner
+*/
+.macro CalcComplex_1x4 vRealResult1, vImageResult1, vRealResult2, vImageResult2, vReal1, vImage1, vReal2, vImage2, vecRealB, vecImageB
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmsdb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmadb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmsdb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmadb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ #endif
+
+ #if defined(RN) || defined(CN) || defined(RT) || defined(CT)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmadb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmsdb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmadb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmsdb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ #endif
+
+ #if defined(NR) || defined(TR) || defined(NC) || defined(TC)
+ vfmadb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmsdb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmadb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmsdb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ vfmadb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmsdb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmadb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmsdb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmsdb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmadb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmsdb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmadb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+ #endif
+
+.endm
+
+.macro CalcComplex_1x1 RealResult1, ImageResult1, Real1, Image1, RealB, ImageB
+ #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
+ msebr \RealResult1, \Image1, \ImageB
+ maebr \ImageResult1, \Real1, \ImageB
+ msebr \RealResult1, \Real1, \RealB
+ maebr \ImageResult1, \Image1, \RealB
+ #endif
+
+ #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
+ maebr \RealResult1, \Image1, \ImageB
+ msebr \ImageResult1, \Real1, \ImageB
+ maebr \RealResult1, \Real1, \RealB
+ msebr \ImageResult1, \Image1, \RealB
+ #endif
+
+ #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
+ maebr \RealResult1, \Image1, \ImageB
+ msebr \ImageResult1, \Image1, \RealB
+ maebr \RealResult1, \Real1, \RealB
+ msebr \ImageResult1, \Real1, \ImageB
+ #endif
+ #if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+ msebr \RealResult1, \Real1, \RealB
+ maebr \ImageResult1, \Image1, \RealB
+ msebr \RealResult1, \Image1, \ImageB
+ maebr \ImageResult1, \Real1, \ImageB
+ #endif
+.endm
+
+#define DISP(ind,stride,disp) (ind*stride+disp)
+#define DISP64(ind,disp) (ind*32+disp)
+#define DISP32(ind,disp) (ind*16+disp)
+#define DISP16(ind,disp) (ind*8+disp)
+
+#define unit_size 8
+#define DISP(ind,stride,disp) (ind*stride+disp)
+#define DISP8(ind,disp) (ind*unit_size*8+disp)
+#define DISP4(ind,disp) (ind*unit_size*4+disp)
+#define DISP2(ind,disp) (ind*unit_size*2+disp)
+#define DISP1(ind,disp) (ind*unit_size+disp)
+#define N8 (8*unit_size)
+#define N4 (4*unit_size)
+#define N2 (2*unit_size)
+#define N1 (1*unit_size)
+
+
+
+.macro ZCALC_4x4_I PTR_A_REG,PTR_B_REG,Index,IsLast
+
+ vlef %v1, DISP4(\Index ,0) (\PTR_A_REG),0
+ vlef %v5, DISP4(\Index ,4) (\PTR_A_REG),0
+ vlef %v1, DISP4(\Index ,8) (\PTR_A_REG),2
+ vlef %v5, DISP4(\Index ,12) (\PTR_A_REG),2
+ vlef %v3, DISP4(\Index ,16) (\PTR_A_REG),0
+ vlef %v7, DISP4(\Index ,20) (\PTR_A_REG),0
+ vlef %v3, DISP4(\Index ,24) (\PTR_A_REG),2
+ vlef %v7, DISP4(\Index ,28) (\PTR_A_REG),2
+ vlrepf %v9, DISP4(\Index ,0)(\PTR_B_REG)
+ vlrepf %v10 , DISP4(\Index ,4)(\PTR_B_REG)
+ vlrepf %v11, DISP4(\Index ,8)(\PTR_B_REG)
+ vlrepf %v12 , DISP4(\Index ,12)(\PTR_B_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v3,%v3
+ vldeb %v7,%v7
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ vldeb %v11,%v11
+ vldeb %v12,%v12
+
+ CalcComplex_4x2 %v16,%v17,%v18,%v19,%v20,%v21,%v22,%v23,%v1,%v5,%v3,%v7,%v9,%v10,%v11,%v12
+
+ vlrepf %v9, DISP4(\Index ,16)(\PTR_B_REG)
+ vlrepf %v10 , DISP4(\Index ,20)(\PTR_B_REG)
+ vlrepf %v11, DISP4(\Index ,24)(\PTR_B_REG)
+ vlrepf %v12 , DISP4(\Index ,28)(\PTR_B_REG)
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ vldeb %v11,%v11
+ vldeb %v12,%v12
+
+ .if \IsLast==1
+ la \PTR_A_REG, DISP4(\Index ,32)(\PTR_A_REG)
+ .endif
+ CalcComplex_4x2 %v24,%v25,%v26,%v27,%v28,%v29,%v30,%v31,%v1,%v5,%v3,%v7,%v9,%v10,%v11,%v12
+
+ .if \IsLast==1
+ la \PTR_B_REG, DISP4(\Index ,32)(\PTR_B_REG)
+ .endif
+.endm
+
+.macro ZCALC_4x2_I PTR_A_REG,PTR_B_REG,Index,IsLast
+
+ vlef %v1, DISP4(\Index ,0) (\PTR_A_REG),0
+ vlef %v5, DISP4(\Index ,4) (\PTR_A_REG),0
+ vlef %v1, DISP4(\Index ,8) (\PTR_A_REG),2
+ vlef %v5, DISP4(\Index ,12) (\PTR_A_REG),2
+ vlef %v3, DISP4(\Index ,16) (\PTR_A_REG),0
+ vlef %v7, DISP4(\Index ,20) (\PTR_A_REG),0
+ vlef %v3, DISP4(\Index ,24) (\PTR_A_REG),2
+ vlef %v7, DISP4(\Index ,28) (\PTR_A_REG),2
+ vlrepf %v9, DISP2(\Index ,0)(\PTR_B_REG)
+ vlrepf %v10 , DISP2(\Index ,4)(\PTR_B_REG)
+ vlrepf %v11, DISP2(\Index ,8)(\PTR_B_REG)
+ vlrepf %v12 , DISP2(\Index ,12)(\PTR_B_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v3,%v3
+ vldeb %v7,%v7
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ vldeb %v11,%v11
+ vldeb %v12,%v12
+ .if \IsLast==1
+ la \PTR_A_REG, DISP4(\Index ,32)(\PTR_A_REG)
+ .endif
+ CalcComplex_4x2 %v16,%v17,%v18,%v19,%v20,%v21,%v22,%v23,%v1,%v5,%v3,%v7,%v9,%v10,%v11,%v12
+
+ .if \IsLast==1
+ la \PTR_B_REG, DISP2(\Index ,16)(\PTR_B_REG)
+ .endif
+.endm
+
+.macro ZCALC_2x4_I PTR_A_REG,PTR_B_REG,Index,IsLast
+ vlef %v1, DISP4(\Index ,0) (\PTR_B_REG),0
+ vlef %v5, DISP4(\Index ,4) (\PTR_B_REG),0
+ vlef %v1, DISP4(\Index ,8) (\PTR_B_REG),2
+ vlef %v5, DISP4(\Index ,12) (\PTR_B_REG),2
+ vlef %v3, DISP4(\Index ,16) (\PTR_B_REG),0
+ vlef %v7, DISP4(\Index ,20) (\PTR_B_REG),0
+ vlef %v3, DISP4(\Index ,24) (\PTR_B_REG),2
+ vlef %v7, DISP4(\Index ,28) (\PTR_B_REG),2
+ vlrepf %v9, DISP2(\Index ,0)(\PTR_A_REG)
+ vlrepf %v10 , DISP2(\Index ,4)(\PTR_A_REG)
+ vlrepf %v11, DISP2(\Index ,8)(\PTR_A_REG)
+ vlrepf %v12 , DISP2(\Index ,12)(\PTR_A_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v3,%v3
+ vldeb %v7,%v7
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ vldeb %v11,%v11
+ vldeb %v12,%v12
+ .if \IsLast==1
+ la \PTR_B_REG, DISP4(\Index ,32)(\PTR_B_REG)
+ .endif
+ CalcComplex_2x4 %v16,%v17,%v18,%v19,%v20,%v21,%v22,%v23,%v1,%v5,%v3,%v7,%v9,%v10,%v11,%v12
+
+ .if \IsLast==1
+ la \PTR_A_REG, DISP2(\Index ,16)(\PTR_A_REG)
+ .endif
+.endm
+
+.macro ZCALC_4x1_I PTR_A_REG,PTR_B_REG,Index,IsLast
+ vlef %v1, DISP4(\Index ,0) (\PTR_A_REG),0
+ vlef %v5, DISP4(\Index ,4) (\PTR_A_REG),0
+ vlef %v1, DISP4(\Index ,8) (\PTR_A_REG),2
+ vlef %v5, DISP4(\Index ,12) (\PTR_A_REG),2
+ vlef %v3, DISP4(\Index ,16) (\PTR_A_REG),0
+ vlef %v7, DISP4(\Index ,20) (\PTR_A_REG),0
+ vlef %v3, DISP4(\Index ,24) (\PTR_A_REG),2
+ vlef %v7, DISP4(\Index ,28) (\PTR_A_REG),2
+ vlrepf %v9, DISP1(\Index ,0)(\PTR_B_REG)
+ vlrepf %v10 , DISP1(\Index ,4)(\PTR_B_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v3,%v3
+ vldeb %v7,%v7
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ .if \IsLast==1
+ la \PTR_A_REG, DISP4(\Index ,32)(\PTR_A_REG)
+ .endif
+ CalcComplex_4x1 %v16,%v17,%v18,%v19,%v1,%v5,%v3,%v7,%v9,%v10
+
+ .if \IsLast==1
+ la \PTR_B_REG, DISP1(\Index ,8)(\PTR_B_REG)
+ .endif
+
+.endm
+
+.macro ZCALC_1x4_I PTR_A_REG,PTR_B_REG,Index,IsLast
+ vlef %v1, DISP4(\Index ,0) (\PTR_B_REG),0
+ vlef %v5, DISP4(\Index ,4) (\PTR_B_REG),0
+ vlef %v1, DISP4(\Index ,8) (\PTR_B_REG),2
+ vlef %v5, DISP4(\Index ,12) (\PTR_B_REG),2
+ vlef %v3, DISP4(\Index ,16) (\PTR_B_REG),0
+ vlef %v7, DISP4(\Index ,20) (\PTR_B_REG),0
+ vlef %v3, DISP4(\Index ,24) (\PTR_B_REG),2
+ vlef %v7, DISP4(\Index ,28) (\PTR_B_REG),2
+ vlrepf %v9, DISP1(\Index ,0)(\PTR_A_REG)
+ vlrepf %v10 , DISP1(\Index ,4)(\PTR_A_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v3,%v3
+ vldeb %v7,%v7
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ .if \IsLast==1
+ la \PTR_B_REG, DISP4(\Index ,32)(\PTR_B_REG)
+ .endif
+ CalcComplex_1x4 %v16,%v17,%v18,%v19,%v1,%v5,%v3,%v7,%v9,%v10
+
+ .if \IsLast==1
+ la \PTR_A_REG, DISP1(\Index ,8)(\PTR_A_REG)
+ .endif
+.endm
+
+.macro ZCALC_2x2_I PTR_A_REG,PTR_B_REG ,Index,IsLast
+ vlef %v1, DISP2(\Index ,0) (\PTR_A_REG),0
+ vlef %v5, DISP2(\Index ,4) (\PTR_A_REG),0
+ vlef %v1, DISP2(\Index ,8) (\PTR_A_REG),2
+ vlef %v5, DISP2(\Index ,12) (\PTR_A_REG),2
+ vlrepf %v9, DISP2(\Index ,0)(\PTR_B_REG)
+ vlrepf %v10 , DISP2(\Index ,4)(\PTR_B_REG)
+ vlrepf %v11, DISP2(\Index ,8)(\PTR_B_REG)
+ vlrepf %v12 , DISP2(\Index ,12)(\PTR_B_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ vldeb %v11,%v11
+ vldeb %v12,%v12
+ .if \IsLast==1
+ la \PTR_A_REG, DISP2(\Index ,16)(\PTR_A_REG)
+ .endif
+ CalcComplex_2x2 %v16,%v17,%v20,%v21,%v1,%v5, %v9,%v10,%v11,%v12
+ .if \IsLast==1
+ la \PTR_B_REG, DISP2(\Index ,16)(\PTR_B_REG)
+ .endif
+.endm
+
+.macro ZCALC_2x1_I PTR_A_REG,PTR_B_REG ,Index,IsLast
+ vlef %v1, DISP2(\Index ,0) (\PTR_A_REG),0
+ vlef %v5, DISP2(\Index ,4) (\PTR_A_REG),0
+ vlef %v1, DISP2(\Index ,8) (\PTR_A_REG),2
+ vlef %v5, DISP2(\Index ,12) (\PTR_A_REG),2
+ vlrepf %v9, DISP1(\Index ,0)(\PTR_B_REG)
+ vlrepf %v10 , DISP1(\Index ,4)(\PTR_B_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ .if \IsLast==1
+ la \PTR_A_REG, DISP2(\Index ,16)(\PTR_A_REG)
+ .endif
+ CalcComplex_2x1 %v16,%v17, %v1,%v5, %v9,%v10
+ .if \IsLast==1
+ la \PTR_B_REG, DISP1(\Index ,8)(\PTR_B_REG)
+ .endif
+.endm
+
+.macro ZCALC_1x2_I PTR_A_REG,PTR_B_REG ,Index,IsLast
+ vlef %v1, DISP2(\Index ,0) (\PTR_B_REG),0
+ vlef %v5, DISP2(\Index ,4) (\PTR_B_REG),0
+ vlef %v1, DISP2(\Index ,8) (\PTR_B_REG),2
+ vlef %v5, DISP2(\Index ,12) (\PTR_B_REG),2
+ vlrepf %v9, DISP1(\Index ,0)(\PTR_A_REG)
+ vlrepf %v10 , DISP1(\Index ,4)(\PTR_A_REG)
+ vldeb %v1,%v1
+ vldeb %v5,%v5
+ vldeb %v9,%v9
+ vldeb %v10,%v10
+ .if \IsLast==1
+ la \PTR_B_REG, DISP2(\Index ,16)(\PTR_B_REG)
+ .endif
+ CalcComplex_1x2 %v16,%v17, %v1,%v5, %v9,%v10
+ .if \IsLast==1
+ la \PTR_A_REG, DISP1(\Index ,8)(\PTR_A_REG)
+ .endif
+.endm
+
+.macro ZCALC_1x1_I PTR_A_REG,PTR_B_REG ,Index,IsLast
+ le %f1 , DISP1(\Index ,0)(\PTR_A_REG)
+ le %f3 , DISP1(\Index ,4)(\PTR_A_REG)
+ le %f4 , DISP1(\Index ,0)(\PTR_B_REG)
+ le %f5 , DISP1(\Index ,4)(\PTR_B_REG)
+ .if \IsLast==1
+ la \PTR_A_REG, DISP1(\Index ,8)(\PTR_A_REG)
+ .endif
+ CalcComplex_1x1 %f6,%f7,%f1,%f3,%f4,%f5
+ .if \IsLast==1
+ la \PTR_B_REG, DISP1(\Index ,8)(\PTR_B_REG)
+ .endif
+.endm
+
+.macro ZCALC_4x4 PTR_A_REG,PTR_B_REG
+ ZCALC_4x4_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+.macro ZCALC_4x2 PTR_A_REG,PTR_B_REG
+ ZCALC_4x2_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+.macro ZCALC_4x1 PTR_A_REG,PTR_B_REG
+ ZCALC_4x1_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+
+.macro ZCALC_4x4_4 PTR_A_REG,PTR_B_REG
+ ZCALC_4x4_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_4x4_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_4x4_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_4x4_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+.macro ZCALC_4x2_4 PTR_A_REG,PTR_B_REG
+ ZCALC_4x2_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_4x2_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_4x2_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_4x2_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+.macro ZCALC_4x1_4 PTR_A_REG,PTR_B_REG
+ ZCALC_4x1_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_4x1_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_4x1_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_4x1_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+
+.macro ZCALC_2x4_4 PTR_A_REG,PTR_B_REG
+ ZCALC_2x4_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_2x4_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_2x4_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_2x4_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+
+.macro ZCALC_2x4 PTR_A_REG,PTR_B_REG
+ ZCALC_2x4_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+
+.macro ZCALC_1x4_4 PTR_A_REG,PTR_B_REG
+ ZCALC_1x4_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_1x4_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_1x4_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_1x4_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+
+.macro ZCALC_1x4 PTR_A_REG,PTR_B_REG
+ ZCALC_1x4_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+.macro ZCALC_2x2 PTR_A_REG,PTR_B_REG
+ ZCALC_2x2_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+
+.macro ZCALC_2x2_4 PTR_A_REG,PTR_B_REG
+ ZCALC_2x2_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_2x2_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_2x2_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_2x2_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+
+.macro ZCALC_2x1 PTR_A_REG,PTR_B_REG
+ ZCALC_2x1_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+
+.macro ZCALC_2x1_4 PTR_A_REG,PTR_B_REG
+ ZCALC_2x1_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_2x1_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_2x1_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_2x1_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+
+
+.macro ZCALC_1x2_4 PTR_A_REG,PTR_B_REG
+ ZCALC_1x2_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_1x2_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_1x2_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_1x2_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+
+.macro ZCALC_1x2 PTR_A_REG,PTR_B_REG
+ ZCALC_1x2_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+
+.macro ZCALC_1x1_4 PTR_A_REG,PTR_B_REG
+ ZCALC_1x1_I \PTR_A_REG,\PTR_B_REG,0,0
+ ZCALC_1x1_I \PTR_A_REG,\PTR_B_REG,1,0
+ ZCALC_1x1_I \PTR_A_REG,\PTR_B_REG,2,0
+ ZCALC_1x1_I \PTR_A_REG,\PTR_B_REG,3,1
+.endm
+
+.macro ZCALC_1x1 PTR_A_REG,PTR_B_REG
+ ZCALC_1x1_I \PTR_A_REG,\PTR_B_REG,0,1
+.endm
+
+
+
+/*****************************STORE RESULTS************************************/
+.macro CalcMultAlpha_4x1 vRealResult1, vImageResult1, vRealResult2, vImageResult2, vReal1, vImage1, vReal2, vImage2, vecRealB, vecImageB
+ #if defined (TRMMKERNEL)
+ vfmdb \vRealResult1, \vImage1, \vecImageB
+ vfmdb \vImageResult1, \vReal1, \vecImageB
+ vfmdb \vRealResult2, \vImage2, \vecImageB
+ vfmdb \vImageResult2, \vReal2, \vecImageB
+ #else
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+ vfmsdb \vRealResult2, \vImage2, \vecImageB, \vRealResult2
+ vfmadb \vImageResult2, \vReal2, \vecImageB, \vImageResult2
+#endif
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+ vfmsdb \vRealResult2, \vReal2, \vecRealB, \vRealResult2
+ vfmadb \vImageResult2, \vImage2, \vecRealB, \vImageResult2
+
+.endm
+
+.macro CalcMultAlpha_2x1 vRealResult1, vImageResult1, vReal1, vImage1, vecRealB, vecImageB
+ #if defined (TRMMKERNEL)
+ vfmdb \vRealResult1, \vImage1, \vecImageB
+ vfmdb \vImageResult1, \vReal1, \vecImageB
+#else
+ vfmsdb \vRealResult1, \vImage1, \vecImageB, \vRealResult1
+ vfmadb \vImageResult1, \vReal1, \vecImageB, \vImageResult1
+#endif
+ vfmsdb \vRealResult1, \vReal1, \vecRealB, \vRealResult1
+ vfmadb \vImageResult1, \vImage1, \vecRealB, \vImageResult1
+.endm
+
+.macro CalcMultAlpha_1x1 RealResult1, ImageResult1, Real1, Image1, RealB, ImageB
+
+ msebr \RealResult1, \Image1, \ImageB
+ maebr \ImageResult1, \Real1, \ImageB
+ msebr \RealResult1, \Real1, \RealB
+ maebr \ImageResult1, \Image1, \RealB
+.endm
+
+.macro ZSTORE_4x4 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL ,LC1,LC2
+ #if !defined(TRMMKERNEL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v3, 8(\CIJ_REG),2
+ vlef %v4, 12(\CIJ_REG),2
+ vlef %v5, 16(\CIJ_REG),0
+ vlef %v6, 20(\CIJ_REG),0
+ vlef %v5, 24(\CIJ_REG),2
+ vlef %v6, 28(\CIJ_REG),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+ vldeb %v5,%v5
+ vldeb %v6,%v6
+#endif
+ la \LC1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
+ CalcMultAlpha_4x1 %v3,%v4,%v5,%v6,%v16,%v17,%v18,%v19,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vledb %v5, %v5,0,0
+ vledb %v6, %v6,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v3, 8(\CIJ_REG),2
+ vstef %v4, 12(\CIJ_REG),2
+ vstef %v5, 16(\CIJ_REG),0
+ vstef %v6, 20(\CIJ_REG),0
+ vstef %v5, 24(\CIJ_REG),2
+ vstef %v6, 28(\CIJ_REG),2
+
+ la \LC2,0(\LC1,\LDC_BYTE_ORIGINAL )
+
+ #if !defined(TRMMKERNEL)
+ vlef %v16, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v17, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v16, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v17, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v18, 16(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v19, 20(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v18, 24(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v19, 28(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vldeb %v16,%v16
+ vldeb %v17,%v17
+ vldeb %v18,%v18
+ vldeb %v19,%v19
+#endif
+ CalcMultAlpha_4x1 %v16,%v17,%v18,%v19,%v20,%v21,%v22,%v23,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v16, %v16,0,0
+ vledb %v17, %v17,0,0
+ vledb %v18, %v18,0,0
+ vledb %v19, %v19,0,0
+ vstef %v16, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v17, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v16, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v17, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v18, 16(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v19, 20(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v18, 24(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v19, 28(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+
+#if !defined(TRMMKERNEL)
+ vlef %v3, 0(\CIJ_REG, \LC1),0
+ vlef %v4, 4(\CIJ_REG, \LC1),0
+ vlef %v3, 8(\CIJ_REG, \LC1),2
+ vlef %v4, 12(\CIJ_REG, \LC1),2
+ vlef %v5, 16(\CIJ_REG, \LC1),0
+ vlef %v6, 20(\CIJ_REG, \LC1),0
+ vlef %v5, 24(\CIJ_REG, \LC1),2
+ vlef %v6, 28(\CIJ_REG, \LC1),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+ vldeb %v5,%v5
+ vldeb %v6,%v6
+#endif
+ CalcMultAlpha_4x1 %v3,%v4,%v5,%v6,%v24,%v25,%v26,%v27,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vledb %v5, %v5,0,0
+ vledb %v6, %v6,0,0
+ vstef %v3, 0(\CIJ_REG,\LC1),0
+ vstef %v4, 4(\CIJ_REG,\LC1),0
+ vstef %v3, 8(\CIJ_REG,\LC1),2
+ vstef %v4, 12(\CIJ_REG,\LC1),2
+ vstef %v5, 16(\CIJ_REG,\LC1),0
+ vstef %v6, 20(\CIJ_REG,\LC1),0
+ vstef %v5, 24(\CIJ_REG,\LC1),2
+ vstef %v6, 28(\CIJ_REG,\LC1),2
+
+ #if !defined(TRMMKERNEL)
+ vlef %v16, 0(\CIJ_REG,\LC2),0
+ vlef %v17, 4(\CIJ_REG,\LC2),0
+ vlef %v16, 8(\CIJ_REG,\LC2),2
+ vlef %v17, 12(\CIJ_REG,\LC2),2
+ vlef %v18, 16(\CIJ_REG,\LC2),0
+ vlef %v19, 20(\CIJ_REG,\LC2),0
+ vlef %v18, 24(\CIJ_REG,\LC2),2
+ vlef %v19, 28(\CIJ_REG,\LC2),2
+ vldeb %v16,%v16
+ vldeb %v17,%v17
+ vldeb %v18,%v18
+ vldeb %v19,%v19
+#endif
+ CalcMultAlpha_4x1 %v16,%v17,%v18,%v19,%v28,%v29,%v30,%v31,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v16, %v16,0,0
+ vledb %v17, %v17,0,0
+ vledb %v18, %v18,0,0
+ vledb %v19, %v19,0,0
+ vstef %v16, 0(\CIJ_REG,\LC2),0
+ vstef %v17, 4(\CIJ_REG,\LC2),0
+ vstef %v16, 8(\CIJ_REG,\LC2),2
+ vstef %v17, 12(\CIJ_REG,\LC2),2
+ vstef %v18, 16(\CIJ_REG,\LC2),0
+ vstef %v19, 20(\CIJ_REG,\LC2),0
+ vstef %v18, 24(\CIJ_REG,\LC2),2
+ vstef %v19, 28(\CIJ_REG,\LC2),2
+
+ la \CIJ_REG,32(\CIJ_REG)
+.endm
+
+.macro ZSTORE_4x2 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL
+ #if !defined(TRMMKERNEL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v3, 8(\CIJ_REG),2
+ vlef %v4, 12(\CIJ_REG),2
+ vlef %v5, 16(\CIJ_REG),0
+ vlef %v6, 20(\CIJ_REG),0
+ vlef %v5, 24(\CIJ_REG),2
+ vlef %v6, 28(\CIJ_REG),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+ vldeb %v5,%v5
+ vldeb %v6,%v6
+#endif
+ CalcMultAlpha_4x1 %v3,%v4,%v5,%v6,%v16,%v17,%v18,%v19,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vledb %v5, %v5,0,0
+ vledb %v6, %v6,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v3, 8(\CIJ_REG),2
+ vstef %v4, 12(\CIJ_REG),2
+ vstef %v5, 16(\CIJ_REG),0
+ vstef %v6, 20(\CIJ_REG),0
+ vstef %v5, 24(\CIJ_REG),2
+ vstef %v6, 28(\CIJ_REG),2
+
+ #if !defined(TRMMKERNEL)
+ vlef %v16, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v17, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v16, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v17, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v18, 16(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v19, 20(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v18, 24(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v19, 28(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vldeb %v16,%v16
+ vldeb %v17,%v17
+ vldeb %v18,%v18
+ vldeb %v19,%v19
+#endif
+ CalcMultAlpha_4x1 %v16,%v17,%v18,%v19,%v20,%v21,%v22,%v23,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v16, %v16,0,0
+ vledb %v17, %v17,0,0
+ vledb %v18, %v18,0,0
+ vledb %v19, %v19,0,0
+ vstef %v16, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v17, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v16, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v17, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v18, 16(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v19, 20(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v18, 24(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v19, 28(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+
+ la \CIJ_REG,32(\CIJ_REG)
+.endm
+.macro ZSTORE_4x1 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL
+ #if !defined(TRMMKERNEL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v3, 8(\CIJ_REG),2
+ vlef %v4, 12(\CIJ_REG),2
+ vlef %v5, 16(\CIJ_REG),0
+ vlef %v6, 20(\CIJ_REG),0
+ vlef %v5, 24(\CIJ_REG),2
+ vlef %v6, 28(\CIJ_REG),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+ vldeb %v5,%v5
+ vldeb %v6,%v6
+#endif
+ CalcMultAlpha_4x1 %v3,%v4,%v5,%v6,%v16,%v17,%v18,%v19,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vledb %v5, %v5,0,0
+ vledb %v6, %v6,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v3, 8(\CIJ_REG),2
+ vstef %v4, 12(\CIJ_REG),2
+ vstef %v5, 16(\CIJ_REG),0
+ vstef %v6, 20(\CIJ_REG),0
+ vstef %v5, 24(\CIJ_REG),2
+ vstef %v6, 28(\CIJ_REG),2
+ la \CIJ_REG,32(\CIJ_REG)
+.endm
+
+.macro ZSTORE_1x4 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL,LC1,LC2
+ #if !defined(TRMMKERNEL)
+ la \LC1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v3, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v4, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ la \LC2,0(\LC1,\LDC_BYTE_ORIGINAL )
+ vlef %v5, 0(\CIJ_REG,\LC1),0
+ vlef %v6, 4(\CIJ_REG,\LC1),0
+ vlef %v5, 0(\CIJ_REG,\LC2),2
+ vlef %v6, 4(\CIJ_REG,\LC2),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+ vldeb %v5,%v5
+ vldeb %v6,%v6
+#else
+ la \LC1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
+#endif
+ CalcMultAlpha_4x1 %v3,%v4,%v5,%v6,%v16,%v17,%v18,%v19,\ALPHA_VECREG,\ALPHA_VECI
+#if defined(TRMMKERNEL)
+ la \LC2,0(\LC1,\LDC_BYTE_ORIGINAL )
+#endif
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vledb %v5, %v5,0,0
+ vledb %v6, %v6,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v3, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v4, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v5, 0(\CIJ_REG,\LC1),0
+ vstef %v6, 4(\CIJ_REG,\LC1),0
+ vstef %v5, 0(\CIJ_REG,\LC2),2
+ vstef %v6, 4(\CIJ_REG,\LC2),2
+ la \CIJ_REG,8(\CIJ_REG)
+.endm
+.macro ZSTORE_2x4 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL,LC1,LC2
+ #if !defined(TRMMKERNEL)
+ la \LC1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v24, 8(\CIJ_REG),0
+ vlef %v25, 12(\CIJ_REG),0
+ vlef %v3, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v4, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v24, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v25, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ la \LC2,0(\LC1,\LDC_BYTE_ORIGINAL )
+ vlef %v5, 0(\CIJ_REG,\LC1),0
+ vlef %v6, 4(\CIJ_REG,\LC1),0
+ vlef %v26, 8(\CIJ_REG,\LC1),0
+ vlef %v27, 12(\CIJ_REG,\LC1),0
+ vlef %v5, 0(\CIJ_REG,\LC2),2
+ vlef %v6, 4(\CIJ_REG,\LC2),2
+ vlef %v26, 8(\CIJ_REG,\LC2),2
+ vlef %v27, 12(\CIJ_REG,\LC2),2
+
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+ vldeb %v5,%v5
+ vldeb %v6,%v6
+ vldeb %v24,%v24
+ vldeb %v25,%v25
+ vldeb %v26,%v26
+ vldeb %v27,%v27
+#else
+ la \LC1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
+#endif
+ CalcMultAlpha_4x1 %v3,%v4,%v5,%v6,%v16,%v17,%v18,%v19,\ALPHA_VECREG,\ALPHA_VECI
+ CalcMultAlpha_4x1 %v24,%v25,%v26,%v27,%v20,%v21,%v22,%v23,\ALPHA_VECREG,\ALPHA_VECI
+#if defined(TRMMKERNEL)
+ la \LC2,0(\LC1,\LDC_BYTE_ORIGINAL )
+#endif
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vledb %v5, %v5,0,0
+ vledb %v6, %v6,0,0
+ vledb %v24, %v24,0,0
+ vledb %v25, %v25,0,0
+ vledb %v26, %v26,0,0
+ vledb %v27, %v27,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v24, 8(\CIJ_REG),0
+ vstef %v25, 12(\CIJ_REG),0
+ vstef %v3, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v4, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v24, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v25, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v5, 0(\CIJ_REG,\LC1),0
+ vstef %v6, 4(\CIJ_REG,\LC1),0
+ vstef %v26, 8(\CIJ_REG,\LC1),0
+ vstef %v27, 12(\CIJ_REG,\LC1),0
+ vstef %v5, 0(\CIJ_REG,\LC2),2
+ vstef %v6, 4(\CIJ_REG,\LC2),2
+ vstef %v26, 8(\CIJ_REG,\LC2),2
+ vstef %v27, 12(\CIJ_REG,\LC2),2
+
+ la \CIJ_REG,16(\CIJ_REG)
+
+.endm
+
+.macro ZSTORE_2x2 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL
+#if !defined(TRMMKERNEL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v3, 8(\CIJ_REG),2
+ vlef %v4, 12(\CIJ_REG),2
+ vlef %v5, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v6, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vlef %v5, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v6, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+ vldeb %v5,%v5
+ vldeb %v6,%v6
+#endif
+ CalcMultAlpha_2x1 %v3,%v4, %v16,%v17,\ALPHA_VECREG,\ALPHA_VECI
+ CalcMultAlpha_2x1 %v5,%v6, %v20,%v21 ,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vledb %v5, %v5,0,0
+ vledb %v6, %v6,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v3, 8(\CIJ_REG),2
+ vstef %v4, 12(\CIJ_REG),2
+ vstef %v5, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v6, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),0
+ vstef %v5, 8(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v6, 12(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ la \CIJ_REG,16(\CIJ_REG)
+.endm
+
+.macro ZSTORE_2x1 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL
+#if !defined(TRMMKERNEL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v3, 8(\CIJ_REG),2
+ vlef %v4, 12(\CIJ_REG),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+#endif
+ CalcMultAlpha_2x1 %v3,%v4, %v16,%v17,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v3, 8(\CIJ_REG),2
+ vstef %v4, 12(\CIJ_REG),2
+ la \CIJ_REG,16(\CIJ_REG)
+.endm
+
+.macro ZSTORE_1x2 ALPHA_VECREG,ALPHA_VECI,CIJ_REG , LDC_BYTE_ORIGINAL
+ #if !defined(TRMMKERNEL)
+ vlef %v3, 0(\CIJ_REG),0
+ vlef %v4, 4(\CIJ_REG),0
+ vlef %v3, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vlef %v4, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vldeb %v3,%v3
+ vldeb %v4,%v4
+
+#endif
+ CalcMultAlpha_2x1 %v3,%v4, %v16,%v17,\ALPHA_VECREG,\ALPHA_VECI
+ vledb %v3, %v3,0,0
+ vledb %v4, %v4,0,0
+ vstef %v3, 0(\CIJ_REG),0
+ vstef %v4, 4(\CIJ_REG),0
+ vstef %v3, 0(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ vstef %v4, 4(\CIJ_REG,\LDC_BYTE_ORIGINAL),2
+ la \CIJ_REG,8(\CIJ_REG)
+.endm
+
+.macro ZSTORE_1x1 ALPHA_RR,ALPHA_RI ,CIJ_REG
+#if defined (TRMMKERNEL)
+ lzer %f1
+ lzer %f3
+#else
+ le %f1 , 0(\CIJ_REG)
+ le %f3 , 4(\CIJ_REG )
+#endif
+ ledbr %f4,\ALPHA_RR
+ ledbr %f5,\ALPHA_RI
+ CalcMultAlpha_1x1 %f1,%f3, %f6,%f7,%f4,%f5
+ ste %f1,0(\CIJ_REG)
+ ste %f3,4(\CIJ_REG)
+ la \CIJ_REG,8(\CIJ_REG)
+.endm
+
+/****************************TRMM POINTER REFRESH MACROSES*************************/
+
+.macro RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B
+ #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
+ /* ptrbb = bb;*/
+ lgr \PTR_B,\B_VAL /*refresh BPOINT*/
+
+ #else
+ /* ptrba =ptrba+ off*C_A;
+ ptrbb = bb + off*C_B;*/
+.if \C_B==4
+ .if \C_A==4
+ sllg \PTR_B, \OFF_VAL,5
+ agr \PTR_A,\PTR_B /*ptrba+off*4**/
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+ .elseif \C_A==2
+ sllg \PTR_B, \OFF_VAL,4
+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/
+ agr \PTR_B, \PTR_B
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+
+ .elseif \C_A==1
+ sllg \PTR_B, \OFF_VAL,3
+ agr \PTR_A,\PTR_B /*ptrba+off*4**/
+ sllg \PTR_B, \OFF_VAL,5
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+ .endif
+
+.elseif \C_B==2
+ .if \C_A==4
+ sllg \PTR_B, \OFF_VAL,4
+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/
+ agr \PTR_A,\PTR_B /*ptrba+off*2**/
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+ .elseif \C_A==2
+ sllg \PTR_B, \OFF_VAL,4
+ agr \PTR_A,\PTR_B /*ptrba+off*2**/
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+ .elseif \C_A==1
+ sllg \PTR_B, \OFF_VAL,3
+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/
+ agr \PTR_B,\PTR_B /* off+off**/
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+ .endif
+
+.elseif \C_B==1
+ .if \C_A==4
+ sllg \PTR_B, \OFF_VAL,5
+ agr \PTR_A,\PTR_B /*ptrba+off*4**/
+ sllg \PTR_B, \OFF_VAL,3
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+ .elseif \C_A==2
+ sllg \PTR_B, \OFF_VAL,3
+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/
+ agr \PTR_A,\PTR_B /*ptrba+off*1**/
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+
+ .elseif \C_A==1
+ sllg \PTR_B, \OFF_VAL,3
+ agr \PTR_A,\PTR_B /*ptrba+off*1**/
+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
+ .endif
+.endif
+
+ #endif
+.endm
+
+/**/
+.macro RefreshTempBk TEMP_VAL,BK_VAL,OFF_VAL,INCR_A,INCR_B
+ #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
+ /* temp = bk-off;*/
+ sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL
+
+ #elif defined(LEFT)
+ /* temp = off+INCR_A; // number of values in A */
+ la \TEMP_VAL,\INCR_A(\OFF_VAL)
+ #else
+ /* temp = off+INCR_B // number of values in B*/
+ la \TEMP_VAL,\INCR_B(\OFF_VAL)
+ #endif
+
+.endm
+
+.macro RefreshPointersAndOFF TEMP_VAL,BK_VAL,OFF_VAL,PTR_A,C_A,C_B
+
+ #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
+ /*temp = bk - off;*/
+ sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL
+ #ifdef LEFT
+ /*temp -= 8; // number of values in A*/
+ lay \TEMP_VAL,-\C_A(\TEMP_VAL)
+ #else
+ /*temp -= 4; // number of values in B*/
+ lay \TEMP_VAL,-\C_B(\TEMP_VAL)
+ #endif
+ /*ptrba += temp*C_A;
+ ptrbb += temp*C_B;*/
+
+ .if \C_A==4
+ sllg \TEMP_VAL, \TEMP_VAL,5 /*temp*4*/
+ .elseif \C_A==2
+ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/
+ .elseif \C_A==1
+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/
+ .endif
+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/
+ #endif
+
+ #ifdef LEFT
+ /*off += \c_A; // number of values in A*/
+ aghi \OFF_VAL,\C_A
+ #endif
+.endm
+
*****************************************************************************/
/**************************************************************************************
-* 2017/03/05 AbdelRauf (quickwritereader@gmail.com)
-* BLASTEST : passed
-* CTEST : passed
-* TEST : passed
+* 2017/03/12 AbdelRauf (quickwritereader@gmail.com)
+* BLASTEST : passed
+* CTEST : passed
+* TEST : passed
**************************************************************************************/
/*********************************************************************/
#define ASSEMBLER
#include "common.h"
+/*
-/*
-#BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc
- ##bm=r2,bn=r3, bk=r4, alpha=f0,ba=r5,bb=r6,stack[160] ,ldc=stack[168]
-offset=stack[176]
+BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alphar,FLOAT alphai,FLOAT* ba,FLOAT* bb,
+ FLOAT* C,BLASLONG ldc, BLASLONG offset)
+ ##bm=r2,bn=r3, bk=r4, alpha=f0,aplhai=f2, ba=r5,bb=r6,stack[160] ,ldc=stack[168]
+offset=stack[176]
+
**********************************************************************************************/
-/*Note: r0 can not be used as address disp register */
+/*Note: r0 can not be used as address disp register */
#define BM %r2
#define BM_CUR %r0
#define BK %r4
#define LDC_BYTE %r8
#define ALPHA %f0
+#define ALPHA_I %f2
#define ALPHA_VECT %v0
+#define ALPHA_VECT_I %v2
#define LOCAL_VAR1 %r9
#define LOCAL_VAR2 %r1
#define LOCAL_VAR3 %r11
#define CIJ_LOCAL %r12
#define OFF %r13
#define OFFSET %f8
-#define ALIGN_4 .align 16
-#define ALIGN_2 .align 8
+#define ALIGN_4 .align 32
+#define ALIGN_2 .align 16
#define PREFETCH_INS 1
/**************************Include kernel helper macrosses**********************************/
-#include "dkernelMacros.S"
+#include "ckernelMacrosV.S"
-/***********************************DGEMM***********************************************************/
+/***********************************CGEMM**4x4*******************************************************/
PROLOGUE
#if defined(TRMMKERNEL)
- std OFFSET,40(%r15)
- stmg %r6,%r13,48(%r15)
+ std OFFSET ,40(%r15)
+ stmg %r6,%r13,48(%r15)
#else
- stmg %r6,%r12,48(%r15)
+ stmg %r6,%r12,48(%r15)
#endif
+std %f9, 128(%r15)
+std %f10,136(%r15)
+std %f11,144(%r15)
+std %f12,152(%r15)
+
lg CIJ, 160(%r15)
lg LOCAL_VAR1, 168(%r15)
#if defined(TRMMKERNEL)
-lg OFF,176(%r15)
-ldgr OFFSET ,OFF
+ lg OFF,176(%r15)
+ ldgr OFFSET ,OFF
#endif
srlg BN_CUR,BN,2
+#if defined(RR) || defined(RC) || defined(CR) || defined(CC)
+ lcdbr ALPHA_I,ALPHA_I
+ lcdbr ALPHA ,ALPHA
+#endif
+
vrepg ALPHA_VECT,ALPHA_VECT,0 /*replicate alpha which in f0*/
-sllg LDC_BYTE, LOCAL_VAR1,3 /*calculate lcd stride with bytes double=8 x<<3 */
+sllg LDC_BYTE, LOCAL_VAR1,3 /*calculate lcd stride with complex=8 x<<4 */
+vrepg ALPHA_VECT_I,ALPHA_VECT_I,0 /*replicate alpha which in f0*/
+
+vldeb ALPHA_VECT,ALPHA_VECT
+vldeb ALPHA_VECT_I,ALPHA_VECT_I
#if defined(TRMMKERNEL) && !defined(LEFT)
/*off = -offset;*/
lgdr LOCAL_VAR1,OFFSET
ALIGN_4
.LX4_BN:
#if defined(PREFETCH_INS)
- pfd 1, 0(A)
- pfd 1, 256(A)
- pfd 1, 0(B)
- pfd 1, 256(B)
+ pfd 1, 0(A)
+ pfd 1, 0(B)
#endif
#if defined(TRMMKERNEL) && defined(LEFT)
/*off = offset;*/
lgdr OFF,OFFSET
#endif
-srlg BM_CUR,BM,3
+srlg BM_CUR,BM,2
lgr LOCAL_VAR3,A
lgr CIJ_LOCAL,CIJ
-cijle BM_CUR,0,.L4x4
-ALIGN_4
-.L8x4_BM: /*BM_CUR LOOP */
-
-#if defined(TRMMKERNEL)
-
- /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */
- RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,4
-
- RefreshTempBk LOCAL_VAR1,BK,OFF,8,4
- srl LOCAL_VAR1,2
-
-#else
- srlg LOCAL_VAR1,BK,2 /*refresh BK*/
- lgr LOCAL_VAR2,B /*refresh BPOINT*/
-#endif
-
-ZERO_CVEC_8x4
-cijle LOCAL_VAR1,0,.L8x4_mod
-
+cijle BM_CUR,0,.L2x4
ALIGN_4
-.L8x4_4_BK: /*BK_CUR LOOP */
-#if defined(PREFETCH_INS)
- pfd 1, 512(LOCAL_VAR3)
-#endif
- CALC_8x4_4 LOCAL_VAR3,LOCAL_VAR2
-#if defined(PREFETCH_INS)
- pfd 1, 512(LOCAL_VAR2)
-#endif
-brctg LOCAL_VAR1,.L8x4_4_BK
-
-ALIGN_4
-.L8x4_mod:
-#if defined(TRMMKERNEL)
- RefreshTempBk LOCAL_VAR1,BK,OFF,8,4
- nill LOCAL_VAR1,3
-#else
-la LOCAL_VAR1,3(0,0)
-NGR LOCAL_VAR1,BK /*refresh BK*/
-#endif
-jz .L8x4_BK_Store
-
-ALIGN_4
-.L8x4_BK: /*BK_CUR LOOP */
- CALC_8x4 LOCAL_VAR3,LOCAL_VAR2
-brctg LOCAL_VAR1,.L8x4_BK
-
-ALIGN_4
-.L8x4_BK_Store:
-/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_8x4 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE , LOCAL_VAR1 ,LOCAL_VAR2
-#if defined(TRMMKERNEL)
- /*RefreshPointersAndOFF TEMP_VAL,BK_VAL,OFF_VAL,L_VAR,PTR_A,C_A*/
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,4
-#endif
-brctg BM_CUR,.L8x4_BM
-
-ALIGN_4
-.L4x4:
-
-tmll BM,4
-jz .L2x4
-
-ALIGN_4
.L4x4_BM: /*BM start*/
#if defined(TRMMKERNEL)
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_4x4
+ZERO_ZCVEC_4x4
cijle LOCAL_VAR1,0,.L4x4_mod
ALIGN_4
-.L4x4_4_BK: /*BK_CUR LOOP */
- CALC_4x4_4 LOCAL_VAR3,LOCAL_VAR2
+.L4x4_4_BK: /*BK_CUR LOOP */
+ ZCALC_4x4_4 LOCAL_VAR3,LOCAL_VAR2
+#if defined(PREFETCH_INS)
+ pfd 1, 128(LOCAL_VAR3) /*256-128*/
+ pfd 1, 128(LOCAL_VAR2 )
+#endif
brctg LOCAL_VAR1,.L4x4_4_BK
ALIGN_4
ALIGN_4
.L4x4_BK: /*BK_CUR LOOP */
- CALC_4x4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_4x4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L4x4_BK
ALIGN_4
.L4x4_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_4x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE , LOCAL_VAR1 ,LOCAL_VAR2
+ZSTORE_4x4 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE,LOCAL_VAR1,LOCAL_VAR2
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,4
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,4,4
#endif
+
+brctg BM_CUR,.L4x4_BM
+
ALIGN_2
.L2x4:
#if defined(TRMMKERNEL)
/* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */
- RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,4
-
+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,4
RefreshTempBk LOCAL_VAR1,BK,OFF,2,4
srl LOCAL_VAR1,2
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_2x4
+ZERO_ZCVEC_2x4
cijle LOCAL_VAR1,0,.L2x4_mod
ALIGN_4
.L2x4_4_BK: /*BK_CUR LOOP */
- CALC_2x4_4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_2x4_4 LOCAL_VAR3,LOCAL_VAR2
+#if defined(PREFETCH_INS)
+ pfd 1, 128(LOCAL_VAR2)
+#endif
brctg LOCAL_VAR1,.L2x4_4_BK
ALIGN_4
ALIGN_4
.L2x4_BK: /*BK_CUR LOOP */
- CALC_2x4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_2x4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L2x4_BK
ALIGN_4
.L2x4_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_2x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE , LOCAL_VAR1 ,LOCAL_VAR2
+ZSTORE_2x4 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE ,LOCAL_VAR1,LOCAL_VAR2
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,4
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,2,4
#endif
ALIGN_4
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_1x4
+ZERO_ZCVEC_1x4
cijle LOCAL_VAR1,0,.L1x4_mod
ALIGN_4
-.L1x4_4_BK: /*BK_CUR LOOP */
- CALC_1x4_4 LOCAL_VAR3,LOCAL_VAR2
+.L1x4_4_BK: /*BK_CUR LOOP */
+ ZCALC_1x4_4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L1x4_4_BK
ALIGN_4
ALIGN_4
.L1x4_BK: /*BK_CUR LOOP */
- CALC_1x4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_1x4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L1x4_BK
ALIGN_4
.L1x4_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_1x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE , LOCAL_VAR1 ,LOCAL_VAR2
+ZSTORE_1x4 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE,LOCAL_VAR1,LOCAL_VAR2
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,4
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,1,4
#endif
ALIGN_2
.Lx4_INNER_END:
/*add LDC_BYTE_COPY to new*/
-sllg LOCAL_VAR1,LDC_BYTE,2 /*op*4 */
+sllg LOCAL_VAR1,LDC_BYTE,2 /*multiply*4 */
#if defined(TRMMKERNEL) && !defined(LEFT)
aghi OFF,4
#endif
-sllg LOCAL_VAR2,BK,5 /*op*4*sizeof(double) =op*32* 2**5 */
+sllg LOCAL_VAR2,BK,5 /*multiply*4*sizeof(complex) =multiply*4*8* 2**5 */
la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/
-la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */
+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(complex) */
brctg BN_CUR,.LX4_BN
lgdr OFF,OFFSET
#endif
-srlg BM_CUR,BM,3
+srlg BM_CUR,BM,2
lgr LOCAL_VAR3,A
lgr CIJ_LOCAL,CIJ
-cijle BM_CUR,0,.L4x2
-
-
-ALIGN_4
-.L8x2_BM: /*BM_CUR LOOP */
-#if defined(TRMMKERNEL)
-
- /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */
- RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,2
- RefreshTempBk LOCAL_VAR1,BK,OFF,8,2
- srl LOCAL_VAR1,2
-
-#else
-srlg LOCAL_VAR1,BK,2 /*refresh BK*/
-lgr LOCAL_VAR2,B /*refresh BPOINT*/
-#endif
-ZERO_CVEC_8x2
-cijle LOCAL_VAR1,0,.L8x2_mod
-
-ALIGN_4
-.L8x2_4_BK: /*BK_CUR LOOP */
-#if defined(PREFETCH_INS)
- pfd 1, 256(LOCAL_VAR3)
- pfd 1,64(LOCAL_VAR2)
-#endif
- CALC_8x2_4 LOCAL_VAR3,LOCAL_VAR2
-brctg LOCAL_VAR1,.L8x2_4_BK
-
-ALIGN_4
-.L8x2_mod:
-#if defined(TRMMKERNEL)
- RefreshTempBk LOCAL_VAR1,BK,OFF,8,2
- nill LOCAL_VAR1,3
-#else
-la LOCAL_VAR1,3(0,0)
-NGR LOCAL_VAR1,BK /*refresh BK*/
-#endif
-jz .L8x2_BK_Store
-
-ALIGN_4
-.L8x2_BK: /*BK_CUR LOOP */
- CALC_8x2 LOCAL_VAR3,LOCAL_VAR2
-brctg LOCAL_VAR1,.L8x2_BK
-
-ALIGN_4
-.L8x2_BK_Store:
-/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_8x2 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE
-#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,2
-#endif
-ALIGN_4
-brctg BM_CUR,.L8x2_BM
-
-ALIGN_2
-.L4x2:
-
-tmll BM,4
-jz .L2x2
+cijle BM_CUR,0,.L2x2
ALIGN_4
.L4x2_BM: /*BM start*/
srl LOCAL_VAR1,2
#else
-srlg LOCAL_VAR1,BK,2 /*refresh BK*/
-lgr LOCAL_VAR2,B /*refresh BPOINT*/
+ srlg LOCAL_VAR1,BK,2 /*refresh BK*/
+ lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_4x2
+ZERO_ZCVEC_4x2
cijle LOCAL_VAR1,0,.L4x2_mod
ALIGN_4
-.L4x2_4_BK: /*BK_CUR LOOP */
- CALC_4x2_4 LOCAL_VAR3,LOCAL_VAR2
+.L4x2_4_BK: /*BK_CUR LOOP */
+ ZCALC_4x2_4 LOCAL_VAR3,LOCAL_VAR2
+#if defined(PREFETCH_INS)
+ pfd 1, 128(LOCAL_VAR3)
+#endif
brctg LOCAL_VAR1,.L4x2_4_BK
ALIGN_4
ALIGN_4
.L4x2_BK: /*BK_CUR LOOP */
- CALC_4x2 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_4x2 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L4x2_BK
ALIGN_4
.L4x2_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_4x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE
+ZSTORE_4x2 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,2
-#endif
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,4,2
+#endif
+ALIGN_4
+brctg BM_CUR,.L4x2_BM
+
ALIGN_2
.L2x2:
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_2x2
+ZERO_ZCVEC_2x2
cijle LOCAL_VAR1,0,.L2x2_mod
ALIGN_4
.L2x2_4_BK: /*BK_CUR LOOP */
- CALC_2x2_4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_2x2_4 LOCAL_VAR3,LOCAL_VAR2
+#if defined(PREFETCH_INS)
+ pfd 1, 256(LOCAL_VAR3)
+ pfd 1, 256(LOCAL_VAR2)
+#endif
brctg LOCAL_VAR1,.L2x2_4_BK
ALIGN_4
ALIGN_4
.L2x2_BK: /*BK_CUR LOOP */
- CALC_2x2 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_2x2 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L2x2_BK
ALIGN_4
.L2x2_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_2x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE
+ZSTORE_2x2 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,2
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,2,2
#endif
ALIGN_2
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_1x2
+ZERO_ZCVEC_1x2
cijle LOCAL_VAR1,0,.L1x2_mod
ALIGN_4
.L1x2_4_BK: /*BK_CUR LOOP */
- CALC_1x2_4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_1x2_4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L1x2_4_BK
ALIGN_4
ALIGN_4
.L1x2_BK: /*BK_CUR LOOP */
- CALC_1x2 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_1x2 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L1x2_BK
ALIGN_4
.L1x2_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_1x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE
+ZSTORE_1x2 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,2
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,1,2
#endif
ALIGN_2
.Lx2_INNER_END:
/*add LDC_BYTE_COPY to new*/
-la LOCAL_VAR1,0(LDC_BYTE,LDC_BYTE) /*op*2 */
-sllg LOCAL_VAR2,BK,4 /*op*2*sizeof(double) =op*16* 2**4 */
-la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/
+la LOCAL_VAR1,0(LDC_BYTE,LDC_BYTE) /*multiply*2 */
+sllg LOCAL_VAR2,BK,4 /*multiply*2*sizeof(complex) =multiply*2*8 2^4 */
+la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*2*/
#if defined(TRMMKERNEL) && !defined(LEFT)
aghi OFF,2
#endif
-la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */
+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*2*sizeof(complex) */
/*off = offset;*/
lgdr OFF,OFFSET
#endif
-srlg BM_CUR,BM,3
+srlg BM_CUR,BM,2
lgr LOCAL_VAR3,A
lgr CIJ_LOCAL,CIJ
-cijle BM_CUR,0,.L4x1
-
+cijle BM_CUR,0,.L2x1
ALIGN_4
-.L8x1_BM: /*BM_CUR LOOP */
-#if defined(TRMMKERNEL)
-
- /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */
- RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,1
- RefreshTempBk LOCAL_VAR1,BK,OFF,8,1
- srl LOCAL_VAR1,2
-
-#else
-srlg LOCAL_VAR1,BK,2 /*refresh BK*/
-lgr LOCAL_VAR2,B /*refresh BPOINT*/
-#endif
-ZERO_CVEC_8x1
-cijle LOCAL_VAR1,0,.L8x1_mod
-
-ALIGN_4
-.L8x1_4_BK: /*BK_CUR LOOP */
-#if defined(PREFETCH_INS)
- pfd 1, 256(LOCAL_VAR3)
-#endif
- CALC_8x1_4 LOCAL_VAR3,LOCAL_VAR2
-brctg LOCAL_VAR1,.L8x1_4_BK
-
-ALIGN_4
-.L8x1_mod:
-#if defined(TRMMKERNEL)
- RefreshTempBk LOCAL_VAR1,BK,OFF,8,1
- nill LOCAL_VAR1,3
-#else
-la LOCAL_VAR1,3(0,0)
-NGR LOCAL_VAR1,BK /*refresh BK*/
-#endif
-jz .L8x1_BK_Store
-
-ALIGN_4
-.L8x1_BK: /*BK_CUR LOOP */
- CALC_8x1 LOCAL_VAR3,LOCAL_VAR2
-brctg LOCAL_VAR1,.L8x1_BK
-
-ALIGN_4
-.L8x1_BK_Store:
-/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_8x1 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE
- #if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,1
-#endif
-ALIGN_4
-brctg BM_CUR,.L8x1_BM
-
-ALIGN_2
-.L4x1:
-
-tmll BM,4
-jz .L2x1
-
-ALIGN_4
.L4x1_BM: /*BM start*/
#if defined(TRMMKERNEL)
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_4x1
+ZERO_ZCVEC_4x1
cijle LOCAL_VAR1,0,.L4x1_mod
ALIGN_4
-.L4x1_4_BK: /*BK_CUR LOOP */
- CALC_4x1_4 LOCAL_VAR3,LOCAL_VAR2
+.L4x1_4_BK: /*BK_CUR LOOP */
+ ZCALC_4x1_4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L4x1_4_BK
ALIGN_4
ALIGN_4
.L4x1_BK: /*BK_CUR LOOP */
- CALC_4x1 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_4x1 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L4x1_BK
ALIGN_4
.L4x1_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_4x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE
+ZSTORE_4x1 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,1
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,4,1
#endif
+ALIGN_4
+brctg BM_CUR , .L4x1_BM
+
ALIGN_2
.L2x1:
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_2x1
+ZERO_ZCVEC_2x1
cijle LOCAL_VAR1,0,.L2x1_mod
ALIGN_4
.L2x1_4_BK: /*BK_CUR LOOP */
- CALC_2x1_4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_2x1_4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L2x1_4_BK
ALIGN_4
ALIGN_4
.L2x1_BK: /*BK_CUR LOOP */
- CALC_2x1 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_2x1 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L2x1_BK
ALIGN_4
.L2x1_BK_Store:
/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_2x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE
+ZSTORE_2x1 ALPHA_VECT,ALPHA_VECT_I ,CIJ_LOCAL, LDC_BYTE
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,1
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,2,1
#endif
ALIGN_2
srlg LOCAL_VAR1,BK,2 /*refresh BK*/
lgr LOCAL_VAR2,B /*refresh BPOINT*/
#endif
-ZERO_CVEC_1x1
+ZERO_ZCVEC_1x1
cijle LOCAL_VAR1,0,.L1x1_mod
ALIGN_4
.L1x1_4_BK: /*BK_CUR LOOP */
- CALC_1x1_4 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_1x1_4 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L1x1_4_BK
ALIGN_4
ALIGN_4
.L1x1_BK: /*BK_CUR LOOP */
- CALC_1x1 LOCAL_VAR3,LOCAL_VAR2
+ ZCALC_1x1 LOCAL_VAR3,LOCAL_VAR2
brctg LOCAL_VAR1,.L1x1_BK
ALIGN_4
.L1x1_BK_Store:
-/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/
-STORE_1x1 ALPHA ,CIJ_LOCAL, LDC_BYTE
+/*store C and use CIJ_COPY for mem storing*/
+ZSTORE_1x1 ALPHA,ALPHA_I ,CIJ_LOCAL
#if defined(TRMMKERNEL)
- RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,1
+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR3,1,1
#endif
ALIGN_2
.Lx1_INNER_END:
/*add LDC_BYTE_COPY to new*/
-sllg LOCAL_VAR2,BK,3 /*op*2*sizeof(double) =op*8* 2**3 */
+sllg LOCAL_VAR2,BK,3 /*multiply*1*sizeof(complex) =multiply*1*8* 2^3 */
la CIJ,0(CIJ,LDC_BYTE) /*refresh CIJ=CIJ+LDC_BYTE */
#if defined(TRMMKERNEL) && !defined(LEFT)
aghi OFF,1
#endif
-la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*1*sizeof(double) */
+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*1*sizeof(complex) */
ALIGN_2
.L_FUNC_END:
/*end*/
+
+
#if defined(TRMMKERNEL)
- ld OFFSET,40(%r15)
- lmg %r6,%r13,48(%r15)
+ld OFFSET,40(%r15)
+lmg %r6,%r13,48(%r15)
#else
- lmg %r6,%r12,48(%r15)
+lmg %r6,%r12,48(%r15)
#endif
+ld %f9, 128(%r15)
+ld %f10,136(%r15)
+ld %f11,144(%r15)
+ld %f12,152(%r15)
br %r14
.end
+
+
+
+
+
+
+
+
+
+++ /dev/null
-/**********************************Zero Vectors**************************************************/
-
-.macro ZERO_CVEC_8x4
- vzero %v16
- vzero %v17
- vzero %v18
- vzero %v19
- vzero %v20
- vzero %v21
- vzero %v22
- vzero %v23
- vzero %v24
- vzero %v25
- vzero %v26
- vzero %v27
- vzero %v28
- vzero %v29
- vzero %v30
- vzero %v31
-.endm
-
-
-.macro ZERO_CVEC_8x2
- vzero %v16
- vzero %v17
- vzero %v18
- vzero %v19
- vzero %v20
- vzero %v21
- vzero %v22
- vzero %v23
-
-.endm
-
-.macro ZERO_CVEC_8x1
- vzero %v16
- vzero %v17
- vzero %v18
- vzero %v19
-.endm
-
-.macro ZERO_CVEC_4x4
- vzero %v16
- vzero %v17
- vzero %v20
- vzero %v21
- vzero %v24
- vzero %v25
- vzero %v28
- vzero %v29
-.endm
-
-.macro ZERO_CVEC_4x2
- vzero %v16
- vzero %v17
- vzero %v20
- vzero %v21
-
-.endm
-
-.macro ZERO_CVEC_4x1
- vzero %v16
- vzero %v17
-.endm
-
-.macro ZERO_CVEC_2x4
- vzero %v16
- vzero %v17
- vzero %v20
- vzero %v21
-
-.endm
-
-.macro ZERO_CVEC_2x2
- vzero %v16
- vzero %v20
-
-.endm
-
-.macro ZERO_CVEC_2x1
- vzero %v16
-.endm
-
-.macro ZERO_CVEC_1x4
- vzero %v16
- vzero %v17
-.endm
-
-.macro ZERO_CVEC_1x2
- vzero %v16
-.endm
-
-.macro ZERO_CVEC_1x1
- LZDR %f1
-.endm
-
-
-/***********************************Helper Calculations*************************************/
-#define unit_size 8
-#define DISP(ind,stride,disp) (ind*stride+disp)
-#define DISP8(ind,disp) (ind*unit_size*8+disp)
-#define DISP4(ind,disp) (ind*unit_size*4+disp)
-#define DISP2(ind,disp) (ind*unit_size*2+disp)
-#define DISP1(ind,disp) (ind*unit_size+disp)
-#define N8 (8*unit_size)
-#define N4 (4*unit_size)
-#define N2 (2*unit_size)
-#define N1 (1*unit_size)
-
-.macro Calculate_8x4_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP4(\Index ,0)(\PTR_B_REG)
- vlrepg %v1, DISP4(\Index ,8)(\PTR_B_REG)
- vl %v2, DISP8(\Index , 0)(\PTR_A_REG)
- vl %v3, DISP8(\Index ,16)(\PTR_A_REG)
- vl %v4, DISP8(\Index ,32)(\PTR_A_REG)
- vl %v5, DISP8(\Index ,48)(\PTR_A_REG)
- vfmadb %v16,%v2,%v7,%v16
- vfmadb %v17,%v3,%v7,%v17
- vfmadb %v18,%v4,%v7,%v18
- vfmadb %v19,%v5,%v7,%v19
- vfmadb %v20,%v2,%v1,%v20
- vfmadb %v21,%v3,%v1,%v21
- vfmadb %v22,%v4,%v1,%v22
- vfmadb %v23,%v5,%v1,%v23
- vlrepg %v7, DISP4(\Index ,16)(\PTR_B_REG)
- vlrepg %v1, DISP4(\Index ,24)(\PTR_B_REG)
- .if \IsLast==1
- la \PTR_A_REG, DISP8(\Index ,64)(\PTR_A_REG)
- .endif
- vfmadb %v24,%v2,%v7,%v24
- vfmadb %v25,%v3,%v7,%v25
- vfmadb %v26,%v4,%v7,%v26
- vfmadb %v27,%v5,%v7,%v27
- vfmadb %v28,%v2,%v1,%v28
- vfmadb %v29,%v3,%v1,%v29
- vfmadb %v30,%v4,%v1,%v30
- vfmadb %v31,%v5,%v1,%v31
- .if \IsLast==1
- la \PTR_B_REG, DISP4(\Index ,32)(\PTR_B_REG)
- .endif
-.endm
-
-.macro Calculate_8x2_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP2(\Index ,0)(\PTR_B_REG)
- vlrepg %v1, DISP2(\Index ,8)(\PTR_B_REG)
- vl %v2, DISP8(\Index ,0)(\PTR_A_REG)
- vl %v3, DISP8(\Index ,16)(\PTR_A_REG)
- vl %v4, DISP8(\Index ,32)(\PTR_A_REG)
- vl %v5, DISP8(\Index ,48)(\PTR_A_REG)
- vfmadb %v16,%v2,%v7,%v16
- vfmadb %v17,%v3,%v7,%v17
- vfmadb %v18,%v4,%v7,%v18
- vfmadb %v19,%v5,%v7,%v19
- vfmadb %v20,%v2,%v1,%v20
- vfmadb %v21,%v3,%v1,%v21
- .if \IsLast==1
- la \PTR_A_REG, DISP8(\Index ,64)(\PTR_A_REG)
- .endif
- vfmadb %v22,%v4,%v1,%v22
- vfmadb %v23,%v5,%v1,%v23
- .if \IsLast==1
- la \PTR_B_REG, DISP2(\Index ,16)(\PTR_B_REG)
- .endif
-.endm
-
-.macro Calculate_8x1_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP1(\Index ,0)(\PTR_B_REG)
- vl %v2, DISP8(\Index ,0)(\PTR_A_REG)
- vl %v3, DISP8(\Index ,16)(\PTR_A_REG)
- vl %v4, DISP8(\Index ,32)(\PTR_A_REG)
- vl %v5, DISP8(\Index ,48)(\PTR_A_REG)
- vfmadb %v16,%v2,%v7,%v16
- .if \IsLast==1
- la \PTR_B_REG, DISP1(\Index ,8)(\PTR_B_REG)
- .endif
- vfmadb %v17,%v3,%v7,%v17
- vfmadb %v18,%v4,%v7,%v18
- vfmadb %v19,%v5,%v7,%v19
- .if \IsLast==1
- la \PTR_A_REG, DISP8(\Index ,64)(\PTR_A_REG)
- .endif
-.endm
-
-.macro Calculate_4x4_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP4(\Index ,0)(\PTR_B_REG)
- vlrepg %v1, DISP4(\Index ,8)(\PTR_B_REG)
- vl %v2, DISP4(\Index ,0)(\PTR_A_REG)
- vl %v3, DISP4(\Index ,16)(\PTR_A_REG)
- vfmadb %v16,%v2,%v7,%v16
- vfmadb %v17,%v3,%v7,%v17
- vfmadb %v20,%v2,%v1,%v20
- vfmadb %v21,%v3,%v1,%v21
- vlrepg %v7, DISP4(\Index ,16)(\PTR_B_REG)
- vlrepg %v1, DISP4(\Index ,24)(\PTR_B_REG)
- .if \IsLast==1
- la \PTR_A_REG, DISP4(\Index ,32)(\PTR_A_REG)
- .endif
- vfmadb %v24,%v2,%v7,%v24
- vfmadb %v25,%v3,%v7,%v25
- vfmadb %v28,%v2,%v1,%v28
- vfmadb %v29,%v3,%v1,%v29
- .if \IsLast==1
- la \PTR_B_REG, DISP4(\Index ,32)(\PTR_B_REG)
- .endif
-.endm
-
-.macro Calculate_4x2_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP2(\Index ,0)(\PTR_B_REG)
- vlrepg %v1, DISP2(\Index ,8)(\PTR_B_REG)
- vl %v2, DISP4(\Index ,0)(\PTR_A_REG)
- vl %v3, DISP4(\Index ,16)(\PTR_A_REG)
- vfmadb %v16,%v2,%v7,%v16
- vfmadb %v17,%v3,%v7,%v17
- .if \IsLast==1
- la \PTR_B_REG, DISP2(\Index ,16)(\PTR_B_REG)
- .endif
- vfmadb %v20,%v2,%v1,%v20
- vfmadb %v21,%v3,%v1,%v21
- .if \IsLast==1
- la \PTR_A_REG, DISP4(\Index ,32)(\PTR_A_REG)
- .endif
-.endm
-
-.macro Calculate_4x1_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP1(\Index ,0)(\PTR_B_REG)
- vl %v2, DISP4(\Index ,0)(\PTR_A_REG)
- vl %v3, DISP4(\Index ,16)(\PTR_A_REG)
- .if \IsLast==1
- la \PTR_B_REG, DISP1(\Index ,8)(\PTR_B_REG)
- .endif
- vfmadb %v16,%v2,%v7,%v16
- vfmadb %v17,%v3,%v7,%v17
- .if \IsLast==1
- la \PTR_A_REG, DISP4(\Index ,32)(\PTR_A_REG)
- .endif
-.endm
-
-.macro Calculate_2x2_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP2(\Index ,0)(\PTR_B_REG)
- vlrepg %v1, DISP2(\Index ,8)(\PTR_B_REG)
- vl %v2, DISP2(\Index ,0)(\PTR_A_REG)
- vfmadb %v16,%v2,%v7,%v16
- .if \IsLast==1
- la \PTR_A_REG, DISP2(\Index ,16)(\PTR_A_REG)
- .endif
- vfmadb %v20,%v2,%v1,%v20
- .if \IsLast==1
- la \PTR_B_REG, DISP2(\Index ,16)(\PTR_B_REG)
- .endif
-.endm
-
-
-
-.macro Calculate_2x1_I PTR_A_REG,PTR_B_REG,Index,IsLast
- vlrepg %v7, DISP1(\Index ,0)(\PTR_B_REG)
- vl %v2, DISP2(\Index ,0)(\PTR_A_REG)
- .if \IsLast==1
- la \PTR_B_REG, DISP1(\Index ,8)(\PTR_B_REG)
- .endif
- vfmadb %v16,%v2,%v7,%v16
- .if \IsLast==1
- la \PTR_A_REG, DISP2(\Index ,16)(\PTR_A_REG)
- .endif
-.endm
-
-.macro Calculate_1x1_I PTR_A_REG,PTR_B_REG,Index,IsLast
- ld %f2,DISP1(\Index ,0)(\PTR_A_REG) /**a*/
- .if \IsLast==1
- la \PTR_A_REG,DISP1(\Index ,8)(\PTR_A_REG)
- .endif
- madb %f1,%f2,DISP1(\Index ,0)(\PTR_B_REG)
- .if \IsLast==1
- la \PTR_B_REG,DISP1(\Index ,8)(\PTR_B_REG)
- .endif
-.endm
-
-.macro CALC_8x4 PTR_A_REG,PTR_B_REG
- Calculate_8x4_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_8x4_4 PTR_A_REG,PTR_B_REG
- Calculate_8x4_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_8x4_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_8x4_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_8x4_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_8x2 PTR_A_REG,PTR_B_REG
- Calculate_8x2_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_8x2_4 PTR_A_REG,PTR_B_REG
- Calculate_8x2_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_8x2_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_8x2_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_8x2_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_8x1 PTR_A_REG,PTR_B_REG
- Calculate_8x1_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_8x1_4 PTR_A_REG,PTR_B_REG
- Calculate_8x1_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_8x1_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_8x1_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_8x1_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_4x4 PTR_A_REG,PTR_B_REG
- Calculate_4x4_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_4x4_4 PTR_A_REG,PTR_B_REG
- Calculate_4x4_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_4x4_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_4x4_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_4x4_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_4x2 PTR_A_REG,PTR_B_REG
- Calculate_4x2_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_4x2_4 PTR_A_REG,PTR_B_REG
- Calculate_4x2_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_4x2_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_4x2_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_4x2_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_4x1 PTR_A_REG,PTR_B_REG
- Calculate_4x1_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_4x1_4 PTR_A_REG,PTR_B_REG
- Calculate_4x1_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_4x1_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_4x1_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_4x1_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_2x4 PTR_A_REG,PTR_B_REG
- Calculate_4x2_I \PTR_B_REG,\PTR_A_REG,0,1
-.endm
-
-.macro CALC_2x4_4 PTR_A_REG,PTR_B_REG
- Calculate_4x2_I \PTR_B_REG,\PTR_A_REG,0,0
- Calculate_4x2_I \PTR_B_REG,\PTR_A_REG,1,0
- Calculate_4x2_I \PTR_B_REG,\PTR_A_REG,2,0
- Calculate_4x2_I \PTR_B_REG,\PTR_A_REG,3,1
-.endm
-
-.macro CALC_2x2 PTR_A_REG,PTR_B_REG
- Calculate_2x2_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_2x2_4 PTR_A_REG,PTR_B_REG
- Calculate_2x2_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_2x2_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_2x2_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_2x2_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_2x1 PTR_A_REG,PTR_B_REG
- Calculate_2x1_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_2x1_4 PTR_A_REG,PTR_B_REG
- Calculate_2x1_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_2x1_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_2x1_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_2x1_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-.macro CALC_1x4 PTR_A_REG,PTR_B_REG
- Calculate_4x1_I \PTR_B_REG,\PTR_A_REG,0,1
-.endm
-
-.macro CALC_1x4_4 PTR_A_REG,PTR_B_REG
- Calculate_4x1_I \PTR_B_REG,\PTR_A_REG,0,0
- Calculate_4x1_I \PTR_B_REG,\PTR_A_REG,1,0
- Calculate_4x1_I \PTR_B_REG,\PTR_A_REG,2,0
- Calculate_4x1_I \PTR_B_REG,\PTR_A_REG,3,1
-.endm
-
-.macro CALC_1x2 PTR_A_REG,PTR_B_REG
- Calculate_2x1_I \PTR_B_REG,\PTR_A_REG,0,1
-.endm
-
-.macro CALC_1x2_4 PTR_A_REG,PTR_B_REG
- Calculate_2x1_I \PTR_B_REG,\PTR_A_REG,0,0
- Calculate_2x1_I \PTR_B_REG,\PTR_A_REG,1,0
- Calculate_2x1_I \PTR_B_REG,\PTR_A_REG,2,0
- Calculate_2x1_I \PTR_B_REG,\PTR_A_REG,3,1
-.endm
-
-.macro CALC_1x1 PTR_A_REG,PTR_B_REG
- Calculate_1x1_I \PTR_A_REG,\PTR_B_REG,0,1
-.endm
-
-.macro CALC_1x1_4 PTR_A_REG,PTR_B_REG
- Calculate_1x1_I \PTR_A_REG,\PTR_B_REG,0,0
- Calculate_1x1_I \PTR_A_REG,\PTR_B_REG,1,0
- Calculate_1x1_I \PTR_A_REG,\PTR_B_REG,2,0
- Calculate_1x1_I \PTR_A_REG,\PTR_B_REG,3,1
-.endm
-
-
-/**************************************STORAGE*************************************************/
-
-
-.macro Multiply_8x1 vr1,vr2,vr3,vr4,va1,va2,va3,va4,vb1
- #if defined(TRMMKERNEL)
- vfmdb \vr1,\va1,\vb1
- vfmdb \vr2,\va2,\vb1
- vfmdb \vr3,\va3,\vb1
- vfmdb \vr4,\va4,\vb1
- #else
- vfmadb \vr1,\va1,\vb1,\vr1
- vfmadb \vr2,\va2,\vb1,\vr2
- vfmadb \vr3,\va3,\vb1,\vr3
- vfmadb \vr4,\va4,\vb1,\vr4
- #endif
-.endm
-
-.macro Multiply_4x1 vr1,vr2, va1,va2, vb1
- #if defined(TRMMKERNEL)
- vfmdb \vr1,\va1,\vb1
- vfmdb \vr2,\va2,\vb1
- #else
- vfmadb \vr1,\va1,\vb1,\vr1
- vfmadb \vr2,\va2,\vb1,\vr2
- #endif
-.endm
-
-.macro Multiply_2x1 vr1, va1,vb1
- #if defined(TRMMKERNEL)
- vfmdb \vr1,\va1,\vb1
- #else
- vfmadb \vr1,\va1,\vb1,\vr1
- #endif
-.endm
-
-
-.macro STORE_8x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL , LV1 ,LV2
- la \LV1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
- vl %v2,16(\CIJ_REG)
- vl %v3,32(\CIJ_REG)
- vl %v4,48(\CIJ_REG)
-#endif
- Multiply_8x1 %v1,%v2,%v3,%v4, %v16,%v17,%v18,%v19 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
- vst %v2,16(\CIJ_REG)
- vst %v3,32(\CIJ_REG)
- vst %v4,48(\CIJ_REG)
-
- la \LV2,0(\LV1,\LDC_BYTE_ORIGINAL )
-#if !defined(TRMMKERNEL)
- vl %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v18,32(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v19,48(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-#endif
- Multiply_8x1 %v16,%v17,%v18,%v19, %v20,%v21,%v22,%v23 ,\ALPHA_VECREG
- vst %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v18,32(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v19,48(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-
- #if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG,\LV1)
- vl %v2,16(\CIJ_REG,\LV1)
- vl %v3,32(\CIJ_REG,\LV1)
- vl %v4,48(\CIJ_REG,\LV1)
-#endif
- Multiply_8x1 %v1,%v2,%v3,%v4, %v24,%v25,%v26,%v27 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG,\LV1)
- vst %v2,16(\CIJ_REG,\LV1)
- vst %v3,32(\CIJ_REG,\LV1)
- vst %v4,48(\CIJ_REG,\LV1)
-
-#if !defined(TRMMKERNEL)
- vl %v16,0(\CIJ_REG,\LV2)
- vl %v17,16(\CIJ_REG,\LV2)
- vl %v18,32(\CIJ_REG,\LV2)
- vl %v19,48(\CIJ_REG,\LV2)
-#endif
- Multiply_8x1 %v16,%v17,%v18,%v19, %v28,%v29,%v30,%v31 ,\ALPHA_VECREG
- vst %v16,0(\CIJ_REG,\LV2)
- vst %v17,16(\CIJ_REG,\LV2)
- vst %v18,32(\CIJ_REG,\LV2)
- vst %v19,48(\CIJ_REG,\LV2)
-
- la \CIJ_REG,64(\CIJ_REG)
-
-.endm
-
-.macro STORE_8x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
- vl %v2,16(\CIJ_REG)
- vl %v3,32(\CIJ_REG)
- vl %v4,48(\CIJ_REG)
-#endif
- Multiply_8x1 %v1,%v2,%v3,%v4, %v16,%v17,%v18,%v19 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
- vst %v2,16(\CIJ_REG)
- vst %v3,32(\CIJ_REG)
- vst %v4,48(\CIJ_REG)
-
-#if !defined(TRMMKERNEL)
- vl %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v18,32(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v19,48(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-#endif
- Multiply_8x1 %v16,%v17,%v18,%v19, %v20,%v21,%v22,%v23 ,\ALPHA_VECREG
- vst %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v18,32(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v19,48(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-
- la \CIJ_REG,64(\CIJ_REG)
-
-.endm
-
-.macro STORE_8x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL
-
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
- vl %v2,16(\CIJ_REG)
- vl %v3,32(\CIJ_REG)
- vl %v4,48(\CIJ_REG)
-#endif
- Multiply_8x1 %v1,%v2,%v3,%v4, %v16,%v17,%v18,%v19 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
- vst %v2,16(\CIJ_REG)
- vst %v3,32(\CIJ_REG)
- vst %v4,48(\CIJ_REG)
-
- la \CIJ_REG,64(\CIJ_REG)
-.endm
-
-
-.macro STORE_4x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL, LV1 ,LV2
- la \LV1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
- vl %v2,16(\CIJ_REG)
-#endif
- Multiply_4x1 %v1,%v2 , %v16,%v17 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
- vst %v2,16(\CIJ_REG)
-
- la \LV2,0(\LV1,\LDC_BYTE_ORIGINAL )
-#if !defined(TRMMKERNEL)
- vl %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-#endif
- Multiply_4x1 %v16,%v17 , %v20,%v21 ,\ALPHA_VECREG
- vst %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-
- #if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG,\LV1)
- vl %v2,16(\CIJ_REG,\LV1)
-#endif
- Multiply_4x1 %v1,%v2 , %v24,%v25 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG,\LV1)
- vst %v2,16(\CIJ_REG,\LV1)
-
-#if !defined(TRMMKERNEL)
- vl %v16,0(\CIJ_REG,\LV2)
- vl %v17,16(\CIJ_REG,\LV2)
-#endif
- Multiply_4x1 %v16,%v17, %v28,%v29 ,\ALPHA_VECREG
- vst %v16,0(\CIJ_REG,\LV2)
- vst %v17,16(\CIJ_REG,\LV2)
-
- la \CIJ_REG,32(\CIJ_REG)
-
-.endm
-
-
-.macro STORE_4x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL
-
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
- vl %v2,16(\CIJ_REG)
-#endif
- Multiply_4x1 %v1,%v2 , %v16,%v17 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
- vst %v2,16(\CIJ_REG)
-
-#if !defined(TRMMKERNEL)
- vl %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vl %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-#endif
- Multiply_4x1 %v16,%v17 , %v20,%v21 ,\ALPHA_VECREG
- vst %v16,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- vst %v17,16(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-
- la \CIJ_REG,32(\CIJ_REG)
-
-.endm
-.macro STORE_4x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL
-
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
- vl %v2,16(\CIJ_REG)
-#endif
- Multiply_4x1 %v1,%v2 , %v16,%v17 ,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
- vst %v2,16(\CIJ_REG)
-
- la \CIJ_REG,32(\CIJ_REG)
-
-.endm
-
-.macro STORE_2x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL
-
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
-#endif
- Multiply_2x1 %v1,%v16,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
-
-#if !defined(TRMMKERNEL)
- vl %v2,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-#endif
- Multiply_2x1 %v2,%v20,\ALPHA_VECREG
- vst %v2,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-
- la \CIJ_REG,16(\CIJ_REG)
-
-.endm
-
-
-.macro STORE_2x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL
-
-#if !defined(TRMMKERNEL)
- vl %v1,0(\CIJ_REG)
-#endif
- Multiply_2x1 %v1,%v16,\ALPHA_VECREG
- vst %v1,0(\CIJ_REG)
-
- la \CIJ_REG,16(\CIJ_REG)
-.endm
-
-
-/*STORE C1X1*/
-.macro STORE_1x1 ALPHA_FLOAT,CIJ_REG,LDC_BYTE_ORIGINAL
-
-#if defined(TRMMKERNEL)
- mdbr %f1,\ALPHA_FLOAT
- std %f1,0(CIJ_LOCAL)
-#else
- ld %f2,0(CIJ_LOCAL)
- madbr %f2,%f1,\ALPHA_FLOAT
- std %f2,0(CIJ_LOCAL)
-#endif
- la \CIJ_REG,8(\CIJ_REG)
-.endm
-
-/*reversed ones*/
-
-.macro STORE_2x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL , LV1 ,LV2
-/**/
- vfmdb %v1,%v16,\ALPHA_REG
- vfmdb %v2,%v17,\ALPHA_REG
- vfmdb %v6,%v20,\ALPHA_REG
- vfmdb %v7,%v21,\ALPHA_REG
- vrepg %v4,%v1,1
- vrepg %v5,%v6,1
- la \LV1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
- #if !defined(TRMMKERNEL)
- adb %f1, 0(\CIJ_REG)
- #endif
- std %f1,0(\CIJ_REG)
- #if !defined(TRMMKERNEL)
- adb %f6, 8(\CIJ_REG)
- #endif
- std %f6,8(\CIJ_REG)
- #if !defined(TRMMKERNEL)
- adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- #endif
- std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- #if !defined(TRMMKERNEL)
- adb %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- #endif
- std %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-
- /*add LDC_BYTE */
- la \LV2,0(\LV1,\LDC_BYTE_ORIGINAL )
- vrepg %v4,%v2,1
- vrepg %v5,%v7,1
- #if !defined(TRMMKERNEL)
- adb %f2,0(\CIJ_REG,\LV1)
- #endif
- std %f2,0(\CIJ_REG,\LV1)
- #if !defined(TRMMKERNEL)
- adb %f7,8(\CIJ_REG,\LV1)
- #endif
- std %f7,8(\CIJ_REG,\LV1)
- #if !defined(TRMMKERNEL)
- adb %f4,0(\CIJ_REG,\LV2)
- #endif
- std %f4,0(\CIJ_REG,\LV2)
- #if !defined(TRMMKERNEL)
- adb %f5,8(\CIJ_REG,\LV2)
- #endif
- std %f5,8(\CIJ_REG,\LV2)
-
- la \CIJ_REG,16(\CIJ_REG)
-
-.endm
-
-.macro STORE_1x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL , LV1 ,LV2
-
- vfmdb %v1,%v16,\ALPHA_REG
- vfmdb %v2,%v17,\ALPHA_REG
- vrepg %v4,%v1,1
- vrepg %v5,%v2,1
- la \LV1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL)
- #if !defined(TRMMKERNEL)
- adb %f1, 0(\CIJ_REG)
- #endif
- std %f1,0(\CIJ_REG)
- #if !defined(TRMMKERNEL)
- adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- #endif
- std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- /*add LDC_BYTE */
- la \LV2,0(\LV1,\LDC_BYTE_ORIGINAL )
- #if !defined(TRMMKERNEL)
- adb %f2,0(\CIJ_REG,\LV1)
- #endif
- std %f2,0(\CIJ_REG,\LV1)
- #if !defined(TRMMKERNEL)
- adb %f5,0(\CIJ_REG,\LV2)
- #endif
- std %f5,0(\CIJ_REG,\LV2)
-
- la \CIJ_REG,8(\CIJ_REG)
-
-.endm
-
- .macro STORE_1x2 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL
-/**/
- vfmdb %v1,%v16,\ALPHA_REG
- vrepg %v4,%v1,1
- #if !defined(TRMMKERNEL)
- adb %f1, 0(\CIJ_REG)
- #endif
- std %f1,0(\CIJ_REG)
- #if !defined(TRMMKERNEL)
- adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
- #endif
- std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL)
-
- la \CIJ_REG,8(\CIJ_REG)
-
-.endm
-
-
-
-
-/****************************TRMM POINTER REFRESH MACROSES*************************/
-
-.macro RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- /* ptrbb = bb;*/
- lgr \PTR_B,\B_VAL /*refresh BPOINT*/
-
- #else
- /* ptrba =ptrba+ off*C_A;
- ptrbb = bb + off*C_B;*/
-.if \C_B==4
- .if \C_A==8
- sllg \PTR_B, \OFF_VAL,5
- la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*4*/
- agr \PTR_A,\PTR_B /*ptrba+off*4**/
- la \PTR_B,0(\B_VAL,\PTR_B)
- .elseif \C_A==4
- sllg \PTR_B, \OFF_VAL,5
- agr \PTR_A,\PTR_B /*ptrba+off*4**/
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .elseif \C_A==2
- sllg \PTR_B, \OFF_VAL,4
- la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/
- agr \PTR_B, \PTR_B
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
-
- .elseif \C_A==1
- sllg \PTR_B, \OFF_VAL,3
- agr \PTR_A,\PTR_B /*ptrba+off*4**/
- sllg \PTR_B, \OFF_VAL,5
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .endif
-
-.elseif \C_B==2
- .if \C_A==8
- sllg \PTR_B, \OFF_VAL,6
- agr \PTR_A,\PTR_B /*ptrba+off*8**/
- sllg \PTR_B, \OFF_VAL,4
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .elseif \C_A==4
- sllg \PTR_B, \OFF_VAL,4
- la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/
- agr \PTR_A,\PTR_B /*ptrba+off*2**/
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .elseif \C_A==2
- sllg \PTR_B, \OFF_VAL,4
- agr \PTR_A,\PTR_B /*ptrba+off*2**/
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .elseif \C_A==1
- sllg \PTR_B, \OFF_VAL,3
- la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/
- agr \PTR_B,\PTR_B /* off+off**/
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .endif
-
-.elseif \C_B==1
- .if \C_A==8
- sllg \PTR_B, \OFF_VAL,6
- agr \PTR_A,\PTR_B /*ptrba+off*8**/
- sllg \PTR_B, \OFF_VAL,3
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .elseif \C_A==4
- sllg \PTR_B, \OFF_VAL,5
- agr \PTR_A,\PTR_B /*ptrba+off*4**/
- sllg \PTR_B, \OFF_VAL,3
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .elseif \C_A==2
- sllg \PTR_B, \OFF_VAL,3
- la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/
- agr \PTR_A,\PTR_B /*ptrba+off*1**/
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
-
- .elseif \C_A==1
- sllg \PTR_B, \OFF_VAL,3
- agr \PTR_A,\PTR_B /*ptrba+off*1**/
- la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/
- .endif
-.endif
-
-
- #endif
-.endm
-
-/**/
-.macro RefreshTempBk TEMP_VAL,BK_VAL,OFF_VAL,INCR_A,INCR_B
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- /* temp = bk-off;*/
- sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL
-
- #elif defined(LEFT)
- /* temp = off+INCR_A; // number of values in A */
- la \TEMP_VAL,\INCR_A(\OFF_VAL)
- #else
- /* temp = off+INCR_B // number of values in B*/
- la \TEMP_VAL,\INCR_B(\OFF_VAL)
- #endif
-
-.endm
-
-
-.macro RefreshPointersAndOFF TEMP_VAL,BK_VAL,OFF_VAL,PTR_B,PTR_A,C_A,C_B
-
- #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- /*temp = bk - off;*/
- sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL
- #ifdef LEFT
- /*temp -= 8; // number of values in A*/
- lay \TEMP_VAL,-\C_A(\TEMP_VAL)
- #else
- /*temp -= 4; // number of values in B*/
- lay \TEMP_VAL,-\C_B(\TEMP_VAL)
- #endif
- /*ptrba += temp*C_A;
- ptrbb += temp*C_B;*/
- .if \C_A==8
- sllg \TEMP_VAL, \TEMP_VAL,6
- .elseif \C_A==4
- sllg \TEMP_VAL, \TEMP_VAL,5 /*temp*4*/
- .elseif \C_A==2
- sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/
- .elseif \C_A==1
- sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/
- .endif
- la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/
- /*we do not need to refresh ptrbb. so lets ignore it*/
-
- #endif
-
- #ifdef LEFT
- /*off += 8; // number of values in A*/
- aghi \OFF_VAL,\C_A
- #endif
-.endm
\ No newline at end of file