CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
-SGEMMKERNEL = ../generic/gemmkernel_2x2.c
-SGEMMONCOPY = ../generic/gemm_ncopy_2.c
-SGEMMOTCOPY = ../generic/gemm_tcopy_2.c
-SGEMMONCOPYOBJ = sgemm_oncopy.o
-SGEMMOTCOPYOBJ = sgemm_otcopy.o
+SGEMMKERNEL = ../mips/sgemm_kernel_8x8_msa.c
+SGEMMONCOPY = ../mips/sgemm_ncopy_8_msa.c
+SGEMMOTCOPY = ../mips/sgemm_tcopy_8_msa.c
+SGEMMONCOPYOBJ = sgemm_oncopy.o
+SGEMMOTCOPYOBJ = sgemm_otcopy.o
DGEMMKERNEL = ../mips/dgemm_kernel_8x4_msa.c
DGEMMINCOPY = ../mips/dgemm_ncopy_8_msa.c
pa0 += 8;
pb0 += 4;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
+ LD_DP2(pb0, 2, src_b0, src_b1);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+ res2 += src_a2 * src_b;
+ res3 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+ res6 += src_a2 * src_b;
+ res7 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b1, (v2i64) src_b1);
+ res8 += src_a0 * src_b;
+ res9 += src_a1 * src_b;
+ res10 += src_a2 * src_b;
+ res11 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b1, (v2i64) src_b1);
+ res12 += src_a0 * src_b;
+ res13 += src_a1 * src_b;
+ res14 += src_a2 * src_b;
+ res15 += src_a3 * src_b;
+
+ pa0 += 8;
+ pb0 += 4;
+
+ LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
+ LD_DP2(pb0, 2, src_b0, src_b1);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+ res2 += src_a2 * src_b;
+ res3 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+ res6 += src_a2 * src_b;
+ res7 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b1, (v2i64) src_b1);
+ res8 += src_a0 * src_b;
+ res9 += src_a1 * src_b;
+ res10 += src_a2 * src_b;
+ res11 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b1, (v2i64) src_b1);
+ res12 += src_a0 * src_b;
+ res13 += src_a1 * src_b;
+ res14 += src_a2 * src_b;
+ res15 += src_a3 * src_b;
+
+ pa0 += 8;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
{
LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
LD_DP2(pb0, 2, src_b0, src_b1);
pa0 += 4;
pb0 += 4;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_DP2(pa0, 2, src_a0, src_a1);
+ LD_DP2(pb0, 2, src_b0, src_b1);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b1, (v2i64) src_b1);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b1, (v2i64) src_b1);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ pa0 += 4;
+ pb0 += 4;
+
+ LD_DP2(pa0, 2, src_a0, src_a1);
+ LD_DP2(pb0, 2, src_b0, src_b1);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b1, (v2i64) src_b1);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b1, (v2i64) src_b1);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ pa0 += 4;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
{
LD_DP2(pa0, 2, src_a0, src_a1);
LD_DP2(pb0, 2, src_b0, src_b1);
pa0 += 2;
pb0 += 4;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ src_a0 = LD_DP(pa0);
+ LD_DP2(pb0, 2, src_b0, src_b1);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res1 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b1, (v2i64) src_b1);
+ res2 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b1, (v2i64) src_b1);
+ res3 += src_a0 * src_b;
+
+ pa0 += 2;
+ pb0 += 4;
+
+ src_a0 = LD_DP(pa0);
+ LD_DP2(pb0, 2, src_b0, src_b1);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res1 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b1, (v2i64) src_b1);
+ res2 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b1, (v2i64) src_b1);
+ res3 += src_a0 * src_b;
+
+ pa0 += 2;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
{
src_a0 = LD_DP(pa0);
LD_DP2(pb0, 2, src_b0, src_b1);
pa0 += 1;
pb0 += 4;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ pa0 += 1;
+ pb0 += 4;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ pa0 += 1;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
{
a0 = pa0[0];
b0 = pb0[0];
pa0 += 8;
pb0 += 2;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
+ src_b0 = LD_DP(pb0);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+ res2 += src_a2 * src_b;
+ res3 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+ res6 += src_a2 * src_b;
+ res7 += src_a3 * src_b;
+
+ pa0 += 8;
+ pb0 += 2;
+
+ LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
+ src_b0 = LD_DP(pb0);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+ res2 += src_a2 * src_b;
+ res3 += src_a3 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+ res6 += src_a2 * src_b;
+ res7 += src_a3 * src_b;
+
+ pa0 += 8;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
{
LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
src_b0 = LD_DP(pb0);
pa0 += 4;
pb0 += 2;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_DP2(pa0, 2, src_a0, src_a1);
+ src_b0 = LD_DP(pb0);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ pa0 += 4;
+ pb0 += 2;
+
+ LD_DP2(pa0, 2, src_a0, src_a1);
+ src_b0 = LD_DP(pb0);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ pa0 += 4;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
{
LD_DP2(pa0, 2, src_a0, src_a1);
src_b0 = LD_DP(pb0);
pa0 += 2;
pb0 += 2;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ src_a0 = LD_DP(pa0);
+ src_b0 = LD_DP(pb0);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res1 += src_a0 * src_b;
+
+ pa0 += 2;
+ pb0 += 2;
+
+ src_a0 = LD_DP(pa0);
+ src_b0 = LD_DP(pb0);
+
+ src_b = (v2f64) __msa_ilvr_d((v2i64) src_b0, (v2i64) src_b0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v2f64) __msa_ilvl_d((v2i64) src_b0, (v2i64) src_b0);
+ res1 += src_a0 * src_b;
+
+ pa0 += 2;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
{
src_a0 = LD_DP(pa0);
src_b0 = LD_DP(pb0);
pa0 += 1;
pb0 += 2;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ pa0 += 1;
+ pb0 += 2;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ pa0 += 1;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
{
a0 = pa0[0];
b0 = pb0[0];
pa0 += 8;
pb0 += 1;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
+ src_b[0] = pb0[0];
+ src_b[1] = pb0[0];
+
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+ res2 += src_a2 * src_b;
+ res3 += src_a3 * src_b;
+
+ pa0 += 8;
+ pb0 += 1;
+
+ LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
+ src_b[0] = pb0[0];
+ src_b[1] = pb0[0];
+
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+ res2 += src_a2 * src_b;
+ res3 += src_a3 * src_b;
+
+ pa0 += 8;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
{
LD_DP4(pa0, 2, src_a0, src_a1, src_a2, src_a3);
src_b[0] = pb0[0];
pa0 += 4;
pb0 += 1;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_DP2(pa0, 2, src_a0, src_a1);
+ src_b[0] = pb0[0];
+ src_b[1] = pb0[0];
+
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ pa0 += 4;
+ pb0 += 1;
+
+ LD_DP2(pa0, 2, src_a0, src_a1);
+ src_b[0] = pb0[0];
+ src_b[1] = pb0[0];
+
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ pa0 += 4;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
{
LD_DP2(pa0, 2, src_a0, src_a1);
src_b[0] = pb0[0];
pa0 += 2;
pb0 += 1;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ src_a0 = LD_DP(pa0);
+ src_b[0] = pb0[0];
+ src_b[1] = pb0[0];
+
+ res0 += src_a0 * src_b;
+
+ pa0 += 2;
+ pb0 += 1;
+
+ src_a0 = LD_DP(pa0);
+ src_b[0] = pb0[0];
+ src_b[1] = pb0[0];
+
+ res0 += src_a0 * src_b;
+
+ pa0 += 2;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
{
src_a0 = LD_DP(pa0);
src_b[0] = pb0[0];
pa0 += 1;
pb0 += 1;
- for (l = (k - 1); l--;)
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ pa0 += 1;
+ pb0 += 1;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ pa0 += 1;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
{
a0 = pa0[0];
b0 = pb0[0];
#include <msa.h>
+#define LD_W(RTYPE, psrc) *((RTYPE *)(psrc))
+#define LD_SP(...) LD_W(v4f32, __VA_ARGS__)
+
#define LD_D(RTYPE, psrc) *((RTYPE *)(psrc))
#define LD_DP(...) LD_D(v2f64, __VA_ARGS__)
+#define ST_W(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
+#define ST_SP(...) ST_W(v4f32, __VA_ARGS__)
+
#define ST_D(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
#define ST_DP(...) ST_D(v2f64, __VA_ARGS__)
+/* Description : Load 2 vectors of single precision floating point elements with stride
+ Arguments : Inputs - psrc, stride
+ Outputs - out0, out1
+ Return Type - single precision floating point
+*/
+#define LD_SP2(psrc, stride, out0, out1) \
+{ \
+ out0 = LD_SP((psrc)); \
+ out1 = LD_SP((psrc) + stride); \
+}
+
/* Description : Load 2 vectors of double precision floating point elements with stride
Arguments : Inputs - psrc, stride
Outputs - out0, out1
LD_DP2(psrc + 2 * stride, stride, out2, out3) \
}
+/* Description : Store vectors of single precision floating point elements with stride
+ Arguments : Inputs - in0, in1, pdst, stride
+ Details : Store 4 single precision floating point elements from 'in0' to (pdst)
+ Store 4 single precision floating point elements from 'in1' to (pdst + stride)
+*/
+#define ST_SP2(in0, in1, pdst, stride) \
+{ \
+ ST_SP(in0, (pdst)); \
+ ST_SP(in1, (pdst) + stride); \
+}
+
+#define ST_SP4(in0, in1, in2, in3, pdst, stride) \
+{ \
+ ST_SP2(in0, in1, (pdst), stride); \
+ ST_SP2(in2, in3, (pdst + 2 * stride), stride); \
+}
+
+#define ST_SP8(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
+{ \
+ ST_SP4(in0, in1, in2, in3, (pdst), stride); \
+ ST_SP4(in4, in5, in6, in7, (pdst + 4 * stride), stride); \
+}
+
/* Description : Store vectors of double precision floating point elements with stride
Arguments : Inputs - in0, in1, pdst, stride
Details : Store 2 double precision floating point elements from 'in0' to (pdst)
Details : Right half of byte elements from 'in0' and 'in1' are
interleaved and written to 'out0'
*/
+#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
+{ \
+ out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
+ out1 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
+}
+#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
+
#define ILVRL_D2(RTYPE, in0, in1, out0, out1) \
{ \
out0 = (RTYPE) __msa_ilvr_d((v2i64) in0, (v2i64) in1); \
}
#define ILVRL_D2_DP(...) ILVRL_D2(v2f64, __VA_ARGS__)
+/* Description : Transpose 4x4 block with word elements in vectors
+ Arguments : Inputs - in0, in1, in2, in3
+ Outputs - out0, out1, out2, out3
+ Return Type - as per RTYPE
+*/
+#define TRANSPOSE4x4_W(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
+{ \
+ v4i32 s0_m, s1_m, s2_m, s3_m; \
+ \
+ ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
+ ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
+ \
+ out0 = (RTYPE) __msa_ilvr_d((v2i64) s2_m, (v2i64) s0_m); \
+ out1 = (RTYPE) __msa_ilvl_d((v2i64) s2_m, (v2i64) s0_m); \
+ out2 = (RTYPE) __msa_ilvr_d((v2i64) s3_m, (v2i64) s1_m); \
+ out3 = (RTYPE) __msa_ilvl_d((v2i64) s3_m, (v2i64) s1_m); \
+}
+
+#define TRANSPOSE4x4_SP_SP(...) TRANSPOSE4x4_W(v4f32, __VA_ARGS__)
+
#endif /* __MACROS_MSA_H__ */
--- /dev/null
+/*******************************************************************************
+Copyright (c) 2016, The OpenBLAS Project
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+3. Neither the name of the OpenBLAS project nor the names of
+its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include "common.h"
+#include "macros_msa.h"
+
+int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, FLOAT *A, FLOAT *B,
+ FLOAT *C, BLASLONG ldc
+#ifdef TRMMKERNEL
+ , BLASLONG offset
+#endif
+ )
+{
+ BLASLONG i, j, l;
+ FLOAT *pc0, *pc1, *pc2, *pc3, *pc4, *pc5, *pc6, *pc7;
+ FLOAT *pa0, *pb0;
+ FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ FLOAT tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
+ FLOAT a0, a1;
+ FLOAT b0, b1, b2, b3, b4, b5, b6, b7;
+ v4f32 v_alpha = {alpha, alpha, alpha, alpha};
+ v4f32 src_a0, src_a1, src_b, src_b0, src_b1;
+ v4f32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4f32 res0, res1, res2, res3, res4, res5, res6, res7;
+ v4f32 res8, res9, res10, res11, res12, res13, res14, res15;
+
+ for (j = (n / 8); j--;)
+ {
+ pc0 = C;
+ pc1 = pc0 + ldc;
+ pc2 = pc1 + ldc;
+ pc3 = pc2 + ldc;
+ pc4 = pc3 + ldc;
+ pc5 = pc4 + ldc;
+ pc6 = pc5 + ldc;
+ pc7 = pc6 + ldc;
+
+ pa0 = A;
+ for (i = (m / 8); i--;)
+ {
+ pb0 = B;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+ res1 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 = src_a0 * src_b;
+ res3 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 = src_a0 * src_b;
+ res5 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 = src_a0 * src_b;
+ res7 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res8 = src_a0 * src_b;
+ res9 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res10 = src_a0 * src_b;
+ res11 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res12 = src_a0 * src_b;
+ res13 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res14 = src_a0 * src_b;
+ res15 = src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 8;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res8 += src_a0 * src_b;
+ res9 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res10 += src_a0 * src_b;
+ res11 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res12 += src_a0 * src_b;
+ res13 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res14 += src_a0 * src_b;
+ res15 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 8;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res8 += src_a0 * src_b;
+ res9 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res10 += src_a0 * src_b;
+ res11 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res12 += src_a0 * src_b;
+ res13 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res14 += src_a0 * src_b;
+ res15 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 8;
+ }
+
+ if ((k - 1) & 1)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res8 += src_a0 * src_b;
+ res9 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res10 += src_a0 * src_b;
+ res11 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res12 += src_a0 * src_b;
+ res13 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res14 += src_a0 * src_b;
+ res15 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 8;
+ }
+
+ LD_SP2(pc0, 4, dst0, dst1);
+ LD_SP2(pc1, 4, dst2, dst3);
+ LD_SP2(pc2, 4, dst4, dst5);
+ LD_SP2(pc3, 4, dst6, dst7);
+
+ dst0 += res0 * v_alpha;
+ dst1 += res1 * v_alpha;
+ dst2 += res2 * v_alpha;
+ dst3 += res3 * v_alpha;
+ dst4 += res4 * v_alpha;
+ dst5 += res5 * v_alpha;
+ dst6 += res6 * v_alpha;
+ dst7 += res7 * v_alpha;
+
+ ST_SP2(dst0, dst1, pc0, 4);
+ ST_SP2(dst2, dst3, pc1, 4);
+ ST_SP2(dst4, dst5, pc2, 4);
+ ST_SP2(dst6, dst7, pc3, 4);
+
+ LD_SP2(pc4, 4, dst0, dst1);
+ LD_SP2(pc5, 4, dst2, dst3);
+ LD_SP2(pc6, 4, dst4, dst5);
+ LD_SP2(pc7, 4, dst6, dst7);
+
+ dst0 += res8 * v_alpha;
+ dst1 += res9 * v_alpha;
+ dst2 += res10 * v_alpha;
+ dst3 += res11 * v_alpha;
+ dst4 += res12 * v_alpha;
+ dst5 += res13 * v_alpha;
+ dst6 += res14 * v_alpha;
+ dst7 += res15 * v_alpha;
+
+ ST_SP2(dst0, dst1, pc4, 4);
+ ST_SP2(dst2, dst3, pc5, 4);
+ ST_SP2(dst4, dst5, pc6, 4);
+ ST_SP2(dst6, dst7, pc7, 4);
+
+ pc0 += 8;
+ pc1 += 8;
+ pc2 += 8;
+ pc3 += 8;
+ pc4 += 8;
+ pc5 += 8;
+ pc6 += 8;
+ pc7 += 8;
+ }
+
+ for (i = ((m & 4) / 4); i--;)
+ {
+ pb0 = B;
+
+ src_a0 = LD_SP(pa0);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res4 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res5 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res6 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res7 = src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 8;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ src_a0 = LD_SP(pa0);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res4 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res5 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res6 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res7 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 8;
+
+ src_a0 = LD_SP(pa0);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res4 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res5 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res6 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res7 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 8;
+ }
+
+ if ((k - 1) & 1)
+ {
+ src_a0 = LD_SP(pa0);
+ LD_SP2(pb0, 4, src_b0, src_b1);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0);
+ res4 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0x55);
+ res5 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xAA);
+ res6 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b1, 0xFF);
+ res7 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 8;
+ }
+
+ dst0 = LD_SP(pc0);
+ dst1 = LD_SP(pc1);
+ dst2 = LD_SP(pc2);
+ dst3 = LD_SP(pc3);
+
+ dst0 += res0 * v_alpha;
+ dst1 += res1 * v_alpha;
+ dst2 += res2 * v_alpha;
+ dst3 += res3 * v_alpha;
+
+ ST_SP(dst0, pc0);
+ ST_SP(dst1, pc1);
+ ST_SP(dst2, pc2);
+ ST_SP(dst3, pc3);
+
+ dst0 = LD_SP(pc4);
+ dst1 = LD_SP(pc5);
+ dst2 = LD_SP(pc6);
+ dst3 = LD_SP(pc7);
+
+ dst0 += res4 * v_alpha;
+ dst1 += res5 * v_alpha;
+ dst2 += res6 * v_alpha;
+ dst3 += res7 * v_alpha;
+
+ ST_SP(dst0, pc4);
+ ST_SP(dst1, pc5);
+ ST_SP(dst2, pc6);
+ ST_SP(dst3, pc7);
+
+ pc0 += 4;
+ pc1 += 4;
+ pc2 += 4;
+ pc3 += 4;
+ pc4 += 4;
+ pc5 += 4;
+ pc6 += 4;
+ pc7 += 4;
+ }
+
+ for (i = ((m & 2) / 2); i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 = a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 = a0 * b1;
+ tmp3 = a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 = a0 * b2;
+ tmp5 = a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 = a0 * b3;
+ tmp7 = a1 * b3;
+
+ b4 = pb0[4];
+ tmp8 = a0 * b4;
+ tmp9 = a1 * b4;
+
+ b5 = pb0[5];
+ tmp10 = a0 * b5;
+ tmp11 = a1 * b5;
+
+ b6 = pb0[6];
+ tmp12 = a0 * b6;
+ tmp13 = a1 * b6;
+
+ b7 = pb0[7];
+ tmp14 = a0 * b7;
+ tmp15 = a1 * b7;
+
+ pa0 += 2;
+ pb0 += 8;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 += a0 * b2;
+ tmp5 += a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 += a0 * b3;
+ tmp7 += a1 * b3;
+
+ b4 = pb0[4];
+ tmp8 += a0 * b4;
+ tmp9 += a1 * b4;
+
+ b5 = pb0[5];
+ tmp10 += a0 * b5;
+ tmp11 += a1 * b5;
+
+ b6 = pb0[6];
+ tmp12 += a0 * b6;
+ tmp13 += a1 * b6;
+
+ b7 = pb0[7];
+ tmp14 += a0 * b7;
+ tmp15 += a1 * b7;
+
+ pa0 += 2;
+ pb0 += 8;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 += a0 * b2;
+ tmp5 += a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 += a0 * b3;
+ tmp7 += a1 * b3;
+
+ b4 = pb0[4];
+ tmp8 += a0 * b4;
+ tmp9 += a1 * b4;
+
+ b5 = pb0[5];
+ tmp10 += a0 * b5;
+ tmp11 += a1 * b5;
+
+ b6 = pb0[6];
+ tmp12 += a0 * b6;
+ tmp13 += a1 * b6;
+
+ b7 = pb0[7];
+ tmp14 += a0 * b7;
+ tmp15 += a1 * b7;
+
+ pa0 += 2;
+ pb0 += 8;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 += a0 * b2;
+ tmp5 += a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 += a0 * b3;
+ tmp7 += a1 * b3;
+
+ b4 = pb0[4];
+ tmp8 += a0 * b4;
+ tmp9 += a1 * b4;
+
+ b5 = pb0[5];
+ tmp10 += a0 * b5;
+ tmp11 += a1 * b5;
+
+ b6 = pb0[6];
+ tmp12 += a0 * b6;
+ tmp13 += a1 * b6;
+
+ b7 = pb0[7];
+ tmp14 += a0 * b7;
+ tmp15 += a1 * b7;
+
+ pa0 += 2;
+ pb0 += 8;
+ }
+
+ tmp0 = alpha * tmp0;
+ tmp2 = alpha * tmp2;
+ tmp4 = alpha * tmp4;
+ tmp6 = alpha * tmp6;
+ tmp8 = alpha * tmp8;
+ tmp10 = alpha * tmp10;
+ tmp12 = alpha * tmp12;
+ tmp14 = alpha * tmp14;
+
+ pc0[0] += tmp0;
+ pc1[0] += tmp2;
+ pc2[0] += tmp4;
+ pc3[0] += tmp6;
+ pc4[0] += tmp8;
+ pc5[0] += tmp10;
+ pc6[0] += tmp12;
+ pc7[0] += tmp14;
+
+ tmp1 = alpha * tmp1;
+ tmp3 = alpha * tmp3;
+ tmp5 = alpha * tmp5;
+ tmp7 = alpha * tmp7;
+ tmp9 = alpha * tmp9;
+ tmp11 = alpha * tmp11;
+ tmp13 = alpha * tmp13;
+ tmp15 = alpha * tmp15;
+
+ pc0[1] += tmp1;
+ pc1[1] += tmp3;
+ pc2[1] += tmp5;
+ pc3[1] += tmp7;
+ pc4[1] += tmp9;
+ pc5[1] += tmp11;
+ pc6[1] += tmp13;
+ pc7[1] += tmp15;
+
+ pc0 += 2;
+ pc1 += 2;
+ pc2 += 2;
+ pc3 += 2;
+ pc4 += 2;
+ pc5 += 2;
+ pc6 += 2;
+ pc7 += 2;
+ }
+
+ for (i = (m & 1); i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 = a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 = a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 = a0 * b3;
+
+ b4 = pb0[4];
+ tmp4 = a0 * b4;
+
+ b5 = pb0[5];
+ tmp5 = a0 * b5;
+
+ b6 = pb0[6];
+ tmp6 = a0 * b6;
+
+ b7 = pb0[7];
+ tmp7 = a0 * b7;
+
+ pa0 += 1;
+ pb0 += 8;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ b4 = pb0[4];
+ tmp4 += a0 * b4;
+
+ b5 = pb0[5];
+ tmp5 += a0 * b5;
+
+ b6 = pb0[6];
+ tmp6 += a0 * b6;
+
+ b7 = pb0[7];
+ tmp7 += a0 * b7;
+
+ pa0 += 1;
+ pb0 += 8;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ b4 = pb0[4];
+ tmp4 += a0 * b4;
+
+ b5 = pb0[5];
+ tmp5 += a0 * b5;
+
+ b6 = pb0[6];
+ tmp6 += a0 * b6;
+
+ b7 = pb0[7];
+ tmp7 += a0 * b7;
+
+ pa0 += 1;
+ pb0 += 8;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ b4 = pb0[4];
+ tmp4 += a0 * b4;
+
+ b5 = pb0[5];
+ tmp5 += a0 * b5;
+
+ b6 = pb0[6];
+ tmp6 += a0 * b6;
+
+ b7 = pb0[7];
+ tmp7 += a0 * b7;
+
+ pa0 += 1;
+ pb0 += 8;
+ }
+
+ tmp0 = alpha * tmp0;
+ tmp1 = alpha * tmp1;
+ tmp2 = alpha * tmp2;
+ tmp3 = alpha * tmp3;
+ tmp4 = alpha * tmp4;
+ tmp5 = alpha * tmp5;
+ tmp6 = alpha * tmp6;
+ tmp7 = alpha * tmp7;
+
+ pc0[0] += tmp0;
+ pc1[0] += tmp1;
+ pc2[0] += tmp2;
+ pc3[0] += tmp3;
+ pc4[0] += tmp4;
+ pc5[0] += tmp5;
+ pc6[0] += tmp6;
+ pc7[0] += tmp7;
+
+ pc0 += 1;
+ pc1 += 1;
+ pc2 += 1;
+ pc3 += 1;
+ pc4 += 1;
+ pc5 += 1;
+ pc6 += 1;
+ pc7 += 1;
+ }
+
+ l = (k << 3);
+ B = B + l;
+ i = (ldc << 3);
+ C = C + i;
+ }
+
+ for (j = ((n & 4) / 4); j--;)
+ {
+ pc0 = C;
+ pc1 = pc0 + ldc;
+ pc2 = pc1 + ldc;
+ pc3 = pc2 + ldc;
+
+ pa0 = A;
+
+ for (i = (m / 8); i--;)
+ {
+ pb0 = B;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+ res1 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 = src_a0 * src_b;
+ res3 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 = src_a0 * src_b;
+ res5 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 = src_a0 * src_b;
+ res7 = src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 4;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 4;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res4 += src_a0 * src_b;
+ res5 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res6 += src_a0 * src_b;
+ res7 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 4;
+ }
+
+ LD_SP2(pc0, 4, dst0, dst1);
+ LD_SP2(pc1, 4, dst2, dst3);
+ LD_SP2(pc2, 4, dst4, dst5);
+ LD_SP2(pc3, 4, dst6, dst7);
+
+ dst0 += res0 * v_alpha;
+ dst1 += res1 * v_alpha;
+ dst2 += res2 * v_alpha;
+ dst3 += res3 * v_alpha;
+ dst4 += res4 * v_alpha;
+ dst5 += res5 * v_alpha;
+ dst6 += res6 * v_alpha;
+ dst7 += res7 * v_alpha;
+
+ ST_SP2(dst0, dst1, pc0, 4);
+ ST_SP2(dst2, dst3, pc1, 4);
+ ST_SP2(dst4, dst5, pc2, 4);
+ ST_SP2(dst6, dst7, pc3, 4);
+
+ pc0 += 8;
+ pc1 += 8;
+ pc2 += 8;
+ pc3 += 8;
+ }
+
+ for (i = ((m & 4) / 4); i--;)
+ {
+ pb0 = B;
+
+ src_a0 = LD_SP(pa0);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 = src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 4;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ src_a0 = LD_SP(pa0);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 4;
+
+ src_a0 = LD_SP(pa0);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
+ {
+ src_a0 = LD_SP(pa0);
+ src_b0 = LD_SP(pb0);
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xAA);
+ res2 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0xFF);
+ res3 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 4;
+ }
+
+ dst0 = LD_SP(pc0);
+ dst1 = LD_SP(pc1);
+ dst2 = LD_SP(pc2);
+ dst3 = LD_SP(pc3);
+
+ dst0 += res0 * v_alpha;
+ dst1 += res1 * v_alpha;
+ dst2 += res2 * v_alpha;
+ dst3 += res3 * v_alpha;
+
+ ST_SP(dst0, pc0);
+ ST_SP(dst1, pc1);
+ ST_SP(dst2, pc2);
+ ST_SP(dst3, pc3);
+
+ pc0 += 4;
+ pc1 += 4;
+ pc2 += 4;
+ pc3 += 4;
+ }
+
+ for (i = ((m & 2) / 2); i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 = a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 = a0 * b1;
+ tmp3 = a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 = a0 * b2;
+ tmp5 = a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 = a0 * b3;
+ tmp7 = a1 * b3;
+
+ pa0 += 2;
+ pb0 += 4;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 += a0 * b2;
+ tmp5 += a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 += a0 * b3;
+ tmp7 += a1 * b3;
+
+ pa0 += 2;
+ pb0 += 4;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 += a0 * b2;
+ tmp5 += a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 += a0 * b3;
+ tmp7 += a1 * b3;
+
+ pa0 += 2;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ b2 = pb0[2];
+ tmp4 += a0 * b2;
+ tmp5 += a1 * b2;
+
+ b3 = pb0[3];
+ tmp6 += a0 * b3;
+ tmp7 += a1 * b3;
+
+ pa0 += 2;
+ pb0 += 4;
+ }
+
+ tmp0 = alpha * tmp0;
+ tmp2 = alpha * tmp2;
+ tmp4 = alpha * tmp4;
+ tmp6 = alpha * tmp6;
+
+ pc0[0] += tmp0;
+ pc1[0] += tmp2;
+ pc2[0] += tmp4;
+ pc3[0] += tmp6;
+
+ tmp1 = alpha * tmp1;
+ tmp3 = alpha * tmp3;
+ tmp5 = alpha * tmp5;
+ tmp7 = alpha * tmp7;
+
+ pc0[1] += tmp1;
+ pc1[1] += tmp3;
+ pc2[1] += tmp5;
+ pc3[1] += tmp7;
+
+ pc0 += 2;
+ pc1 += 2;
+ pc2 += 2;
+ pc3 += 2;
+ }
+
+ for (i = (m & 1); i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 = a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 = a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 = a0 * b3;
+
+ pa0 += 1;
+ pb0 += 4;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ pa0 += 1;
+ pb0 += 4;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ pa0 += 1;
+ pb0 += 4;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ b2 = pb0[2];
+ tmp2 += a0 * b2;
+
+ b3 = pb0[3];
+ tmp3 += a0 * b3;
+
+ pa0 += 1;
+ pb0 += 4;
+ }
+
+ tmp0 = alpha * tmp0;
+ tmp1 = alpha * tmp1;
+ tmp2 = alpha * tmp2;
+ tmp3 = alpha * tmp3;
+
+ pc0[0] += tmp0;
+ pc1[0] += tmp1;
+ pc2[0] += tmp2;
+ pc3[0] += tmp3;
+
+ pc0 += 1;
+ pc1 += 1;
+ pc2 += 1;
+ pc3 += 1;
+ }
+
+ l = (k << 2);
+ B = B + l;
+ i = (ldc << 2);
+ C = C + i;
+ }
+
+ for (j = ((n & 2) / 2); j--;)
+ {
+ pc0 = C;
+ pc1 = pc0 + ldc;
+
+ pa0 = A;
+
+ for (i = (m / 8); i--;)
+ {
+ pb0 = B;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+ res1 = src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 = src_a0 * src_b;
+ res3 = src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 2;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 2;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res2 += src_a0 * src_b;
+ res3 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 2;
+ }
+
+ LD_SP2(pc0, 4, dst0, dst1);
+ LD_SP2(pc1, 4, dst2, dst3);
+
+ dst0 += res0 * v_alpha;
+ dst1 += res1 * v_alpha;
+ dst2 += res2 * v_alpha;
+ dst3 += res3 * v_alpha;
+
+ ST_SP2(dst0, dst1, pc0, 4);
+ ST_SP2(dst2, dst3, pc1, 4);
+
+ pc0 += 8;
+ pc1 += 8;
+ }
+
+ for (i = ((m & 4) / 4); i--;)
+ {
+ pb0 = B;
+
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 = src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 2;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 2;
+
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
+ {
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+ src_b0[1] = pb0[1];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0x55);
+ res1 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 2;
+ }
+
+ dst0 = LD_SP(pc0);
+ dst1 = LD_SP(pc1);
+
+ dst0 += res0 * v_alpha;
+ dst1 += res1 * v_alpha;
+
+ ST_SP(dst0, pc0);
+ ST_SP(dst1, pc1);
+
+ pc0 += 4;
+ pc1 += 4;
+ }
+
+ for (i = ((m & 2) / 2); i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 = a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 = a0 * b1;
+ tmp3 = a1 * b1;
+
+ pa0 += 2;
+ pb0 += 2;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ pa0 += 2;
+ pb0 += 2;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ pa0 += 2;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ b1 = pb0[1];
+ tmp2 += a0 * b1;
+ tmp3 += a1 * b1;
+
+ pa0 += 2;
+ pb0 += 2;
+ }
+
+ tmp0 = alpha * tmp0;
+ tmp2 = alpha * tmp2;
+
+ pc0[0] += tmp0;
+ pc1[0] += tmp2;
+
+ tmp1 = alpha * tmp1;
+ tmp3 = alpha * tmp3;
+
+ pc0[1] += tmp1;
+ pc1[1] += tmp3;
+
+ pc0 += 2;
+ pc1 += 2;
+ }
+
+ for (i = (m & 1); i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 = a0 * b1;
+
+ pa0 += 1;
+ pb0 += 2;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ pa0 += 1;
+ pb0 += 2;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ pa0 += 1;
+ pb0 += 2;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ b1 = pb0[1];
+ tmp1 += a0 * b1;
+
+ pa0 += 1;
+ pb0 += 2;
+ }
+
+ tmp0 = alpha * tmp0;
+ tmp1 = alpha * tmp1;
+
+ pc0[0] += tmp0;
+ pc1[0] += tmp1;
+
+ pc0 += 1;
+ pc1 += 1;
+ }
+
+ l = (k << 1);
+ B = B + l;
+ i = (ldc << 1);
+ C = C + i;
+ }
+
+ for (j = (n & 1); j--;)
+ {
+ pc0 = C;
+ pa0 = A;
+
+ for (i = (m / 8); i--;)
+ {
+ pb0 = B;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+ res1 = src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 1;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 1;
+
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
+ {
+ LD_SP2(pa0, 4, src_a0, src_a1);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+ res1 += src_a1 * src_b;
+
+ pa0 += 8;
+ pb0 += 1;
+ }
+
+ LD_SP2(pc0, 4, dst0, dst1);
+
+ dst0 += res0 * v_alpha;
+ dst1 += res1 * v_alpha;
+
+ ST_SP2(dst0, dst1, pc0, 4);
+
+ pc0 += 8;
+ }
+
+ for (i = ((m & 4) / 4); i--;)
+ {
+ pb0 = B;
+
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 = src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 1;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 1;
+
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
+ {
+ src_a0 = LD_SP(pa0);
+ src_b0[0] = pb0[0];
+
+ src_b = (v4f32) __msa_shf_w((v4i32) src_b0, 0);
+ res0 += src_a0 * src_b;
+
+ pa0 += 4;
+ pb0 += 1;
+ }
+
+ dst0 = LD_SP(pc0);
+
+ dst0 += res0 * v_alpha;
+
+ ST_SP(dst0, pc0);
+
+ pc0 += 4;
+ }
+
+ for (i = (m & 2) / 2; i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 = a1 * b0;
+
+ pa0 += 2;
+ pb0 += 1;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ pa0 += 2;
+ pb0 += 1;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ pa0 += 2;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ a1 = pa0[1];
+ tmp1 += a1 * b0;
+
+ pa0 += 2;
+ pb0 += 1;
+ }
+
+ tmp0 = alpha * tmp0;
+ pc0[0] += tmp0;
+
+ tmp1 = alpha * tmp1;
+ pc0[1] += tmp1;
+
+ pc0 += 2;
+ }
+
+ for (i = (m & 1); i--;)
+ {
+ pb0 = B;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 = a0 * b0;
+
+ pa0 += 1;
+ pb0 += 1;
+
+ for (l = ((k - 1) / 2); l--;)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ pa0 += 1;
+ pb0 += 1;
+
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ pa0 += 1;
+ pb0 += 1;
+ }
+
+ if ((k - 1) & 1)
+ {
+ a0 = pa0[0];
+ b0 = pb0[0];
+ tmp0 += a0 * b0;
+
+ pa0 += 1;
+ pb0 += 1;
+ }
+
+ pc0[0] += alpha * tmp0;
+
+ pc0 += 1;
+ }
+
+ l = (k << 0);
+ B = B + l;
+ i = (ldc << 0);
+ C = C + i;
+ }
+
+ return 0;
+}
--- /dev/null
+/*******************************************************************************
+Copyright (c) 2016, The OpenBLAS Project
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+3. Neither the name of the OpenBLAS project nor the names of
+its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include "common.h"
+#include "macros_msa.h"
+
+int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict src, BLASLONG lda,
+ FLOAT * __restrict dst)
+{
+ BLASLONG i, j;
+ FLOAT *psrc0;
+ FLOAT *psrc1, *psrc2, *psrc3, *psrc4;
+ FLOAT *psrc5, *psrc6, *psrc7, *psrc8;
+ FLOAT *pdst;
+ v4f32 src0, src1, src2, src3, src4, src5, src6, src7;
+ v4f32 src8, src9, src10, src11, src12, src13, src14, src15;
+ v4f32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4f32 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
+
+ psrc0 = src;
+ pdst = dst;
+
+ for (j = (n >> 3); j--;)
+ {
+ psrc1 = psrc0;
+ psrc2 = psrc1 + lda;
+ psrc3 = psrc2 + lda;
+ psrc4 = psrc3 + lda;
+ psrc5 = psrc4 + lda;
+ psrc6 = psrc5 + lda;
+ psrc7 = psrc6 + lda;
+ psrc8 = psrc7 + lda;
+ psrc0 += 8 * lda;
+
+ for (i = (m >> 3); i--;)
+ {
+ LD_SP2(psrc1, 4, src0, src1);
+ LD_SP2(psrc2, 4, src2, src3);
+ LD_SP2(psrc3, 4, src4, src5);
+ LD_SP2(psrc4, 4, src6, src7);
+ LD_SP2(psrc5, 4, src8, src9);
+ LD_SP2(psrc6, 4, src10, src11);
+ LD_SP2(psrc7, 4, src12, src13);
+ LD_SP2(psrc8, 4, src14, src15);
+ psrc1 += 8;
+ psrc2 += 8;
+ psrc3 += 8;
+ psrc4 += 8;
+ psrc5 += 8;
+ psrc6 += 8;
+ psrc7 += 8;
+ psrc8 += 8;
+
+ TRANSPOSE4x4_SP_SP(src0, src2, src4, src6, dst0, dst2, dst4, dst6);
+ TRANSPOSE4x4_SP_SP(src8, src10, src12, src14, dst1, dst3, dst5,
+ dst7);
+ TRANSPOSE4x4_SP_SP(src1, src3, src5, src7, dst8, dst10, dst12,
+ dst14);
+ TRANSPOSE4x4_SP_SP(src9, src11, src13, src15, dst9, dst11, dst13,
+ dst15);
+
+ ST_SP2(dst0, dst1, pdst, 4);
+ ST_SP2(dst2, dst3, pdst + 8, 4);
+ ST_SP2(dst4, dst5, pdst + 16, 4);
+ ST_SP2(dst6, dst7, pdst + 24, 4);
+ ST_SP2(dst8, dst9, pdst + 32, 4);
+ ST_SP2(dst10, dst11, pdst + 40, 4);
+ ST_SP2(dst12, dst13, pdst + 48, 4);
+ ST_SP2(dst14, dst15, pdst + 56, 4);
+ pdst += 64;
+ }
+
+ for (i = (m & 7); i--;)
+ {
+ *pdst++ = *psrc1++;
+ *pdst++ = *psrc2++;
+ *pdst++ = *psrc3++;
+ *pdst++ = *psrc4++;
+ *pdst++ = *psrc5++;
+ *pdst++ = *psrc6++;
+ *pdst++ = *psrc7++;
+ *pdst++ = *psrc8++;
+ }
+ }
+
+ if (n & 4)
+ {
+ psrc1 = psrc0;
+ psrc2 = psrc1 + lda;
+ psrc3 = psrc2 + lda;
+ psrc4 = psrc3 + lda;
+ psrc0 += 4 * lda;
+
+ for (i = (m >> 2); i--;)
+ {
+ src0 = LD_SP(psrc1);
+ src1 = LD_SP(psrc2);
+ src2 = LD_SP(psrc3);
+ src3 = LD_SP(psrc4);
+ psrc1 += 4;
+ psrc2 += 4;
+ psrc3 += 4;
+ psrc4 += 4;
+
+ TRANSPOSE4x4_SP_SP(src0, src1, src2, src3, dst0, dst1, dst2, dst3);
+
+ ST_SP2(dst0, dst1, pdst, 4);
+ ST_SP2(dst2, dst3, pdst + 8, 4);
+ pdst += 16;
+ }
+
+ for (i = (m & 3); i--;)
+ {
+ *pdst++ = *psrc1++;
+ *pdst++ = *psrc2++;
+ *pdst++ = *psrc3++;
+ *pdst++ = *psrc4++;
+ }
+ }
+
+ if (n & 2)
+ {
+ psrc1 = psrc0;
+ psrc2 = psrc1 + lda;
+ psrc0 += 2 * lda;
+
+ for (i = (m >> 1); i--;)
+ {
+ *pdst++ = *psrc1++;
+ *pdst++ = *psrc2++;
+ *pdst++ = *psrc1++;
+ *pdst++ = *psrc2++;
+ }
+
+ if (m & 1)
+ {
+ *pdst++ = *psrc1++;
+ *pdst++ = *psrc2++;
+ }
+ }
+
+ if (n & 1)
+ {
+ psrc1 = psrc0;
+
+ for (i = m; i--;)
+ {
+ *pdst++ = *psrc1++;
+ }
+ }
+
+ return 0;
+}
--- /dev/null
+/*******************************************************************************
+Copyright (c) 2016, The OpenBLAS Project
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+3. Neither the name of the OpenBLAS project nor the names of
+its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include "common.h"
+#include "macros_msa.h"
+
+int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict src, BLASLONG lda,
+ FLOAT * __restrict dst)
+{
+ BLASLONG i, j;
+ FLOAT *psrc0;
+ FLOAT *psrc1, *psrc2, *psrc3, *psrc4;
+ FLOAT *psrc5, *psrc6, *psrc7, *psrc8;
+ FLOAT *pdst0, *pdst1, *pdst2, *pdst3, *pdst4;
+ v4f32 src0, src1, src2, src3, src4, src5, src6, src7;
+ v4f32 src8, src9, src10, src11, src12, src13, src14, src15;
+
+ psrc0 = src;
+ pdst0 = dst;
+
+ pdst2 = dst + m * (n & ~7);
+ pdst3 = dst + m * (n & ~3);
+ pdst4 = dst + m * (n & ~1);
+
+ for (j = (m >> 3); j--;)
+ {
+ psrc1 = psrc0;
+ psrc2 = psrc1 + lda;
+ psrc3 = psrc2 + lda;
+ psrc4 = psrc3 + lda;
+ psrc5 = psrc4 + lda;
+ psrc6 = psrc5 + lda;
+ psrc7 = psrc6 + lda;
+ psrc8 = psrc7 + lda;
+ psrc0 += 8 * lda;
+
+ pdst1 = pdst0;
+ pdst0 += 64;
+
+ for (i = (n >> 3); i--;)
+ {
+ LD_SP2(psrc1, 4, src0, src1);
+ LD_SP2(psrc2, 4, src2, src3);
+ LD_SP2(psrc3, 4, src4, src5);
+ LD_SP2(psrc4, 4, src6, src7);
+ LD_SP2(psrc5, 4, src8, src9);
+ LD_SP2(psrc6, 4, src10, src11);
+ LD_SP2(psrc7, 4, src12, src13);
+ LD_SP2(psrc8, 4, src14, src15);
+ psrc1 += 8;
+ psrc2 += 8;
+ psrc3 += 8;
+ psrc4 += 8;
+ psrc5 += 8;
+ psrc6 += 8;
+ psrc7 += 8;
+ psrc8 += 8;
+
+ ST_SP8(src0, src1, src2, src3, src4, src5, src6, src7, pdst1, 4);
+ ST_SP8(src8, src9, src10, src11, src12, src13, src14, src15,
+ pdst1 + 32, 4);
+ pdst1 += m * 8;
+ }
+
+ if (n & 4)
+ {
+ src0 = LD_SP(psrc1);
+ src1 = LD_SP(psrc2);
+ src2 = LD_SP(psrc3);
+ src3 = LD_SP(psrc4);
+ src4 = LD_SP(psrc5);
+ src5 = LD_SP(psrc6);
+ src6 = LD_SP(psrc7);
+ src7 = LD_SP(psrc8);
+ psrc1 += 4;
+ psrc2 += 4;
+ psrc3 += 4;
+ psrc4 += 4;
+ psrc5 += 4;
+ psrc6 += 4;
+ psrc7 += 4;
+ psrc8 += 4;
+
+ ST_SP8(src0, src1, src2, src3, src4, src5, src6, src7, pdst2, 4);
+ pdst2 += 32;
+ }
+
+ if (n & 2)
+ {
+ *pdst3++ = *psrc1++;
+ *pdst3++ = *psrc1++;
+ *pdst3++ = *psrc2++;
+ *pdst3++ = *psrc2++;
+ *pdst3++ = *psrc3++;
+ *pdst3++ = *psrc3++;
+ *pdst3++ = *psrc4++;
+ *pdst3++ = *psrc4++;
+ *pdst3++ = *psrc5++;
+ *pdst3++ = *psrc5++;
+ *pdst3++ = *psrc6++;
+ *pdst3++ = *psrc6++;
+ *pdst3++ = *psrc7++;
+ *pdst3++ = *psrc7++;
+ *pdst3++ = *psrc8++;
+ *pdst3++ = *psrc8++;
+ }
+
+ if (n & 1)
+ {
+ *pdst4++ = *psrc1++;
+ *pdst4++ = *psrc2++;
+ *pdst4++ = *psrc3++;
+ *pdst4++ = *psrc4++;
+ *pdst4++ = *psrc5++;
+ *pdst4++ = *psrc6++;
+ *pdst4++ = *psrc7++;
+ *pdst4++ = *psrc8++;
+ }
+ }
+
+ if (m & 4)
+ {
+ psrc1 = psrc0;
+ psrc2 = psrc1 + lda;
+ psrc3 = psrc2 + lda;
+ psrc4 = psrc3 + lda;
+ psrc0 += 4 * lda;
+
+ pdst1 = pdst0;
+ pdst0 += 32;
+
+ for (i = (n >> 3); i--;)
+ {
+ LD_SP2(psrc1, 4, src0, src1);
+ LD_SP2(psrc2, 4, src2, src3);
+ LD_SP2(psrc3, 4, src4, src5);
+ LD_SP2(psrc4, 4, src6, src7);
+ psrc1 += 8;
+ psrc2 += 8;
+ psrc3 += 8;
+ psrc4 += 8;
+
+ ST_SP8(src0, src1, src2, src3, src4, src5, src6, src7, pdst1, 4);
+ pdst1 += 8 * m;
+ }
+
+ if (n & 4)
+ {
+ src0 = LD_SP(psrc1);
+ src1 = LD_SP(psrc2);
+ src2 = LD_SP(psrc3);
+ src3 = LD_SP(psrc4);
+ psrc1 += 4;
+ psrc2 += 4;
+ psrc3 += 4;
+ psrc4 += 4;
+
+ ST_SP4(src0, src1, src2, src3, pdst2, 4);
+ pdst2 += 16;
+ }
+
+ if (n & 2)
+ {
+ *pdst3++ = *psrc1++;
+ *pdst3++ = *psrc1++;
+ *pdst3++ = *psrc2++;
+ *pdst3++ = *psrc2++;
+ *pdst3++ = *psrc3++;
+ *pdst3++ = *psrc3++;
+ *pdst3++ = *psrc4++;
+ *pdst3++ = *psrc4++;
+ }
+
+ if (n & 1)
+ {
+ *pdst4++ = *psrc1++;
+ *pdst4++ = *psrc2++;
+ *pdst4++ = *psrc3++;
+ *pdst4++ = *psrc4++;
+ }
+ }
+
+ if (m & 2)
+ {
+ psrc1 = psrc0;
+ psrc2 = psrc1 + lda;
+ psrc0 += 2 * lda;
+
+ pdst1 = pdst0;
+ pdst0 += 16;
+
+ for (i = (n >> 3); i--;)
+ {
+ LD_SP2(psrc1, 4, src0, src1);
+ LD_SP2(psrc2, 4, src2, src3);
+ psrc1 += 8;
+ psrc2 += 8;
+
+ ST_SP4(src0, src1, src2, src3, pdst1, 4);
+ pdst1 += 8 * m;
+ }
+
+ if (n & 4)
+ {
+ src0 = LD_SP(psrc1);
+ src1 = LD_SP(psrc2);
+ psrc1 += 4;
+ psrc2 += 4;
+
+ ST_SP2(src0, src1, pdst2, 4);
+ pdst2 += 8;
+ }
+
+ if (n & 2)
+ {
+ *pdst3++ = *psrc1++;
+ *pdst3++ = *psrc1++;
+ *pdst3++ = *psrc2++;
+ *pdst3++ = *psrc2++;
+ }
+
+ if (n & 1)
+ {
+ *pdst4++ = *psrc1++;
+ *pdst4++ = *psrc2++;
+ }
+ }
+
+ if (m & 1)
+ {
+ psrc1 = psrc0;
+ psrc0 += lda;
+
+ pdst1 = pdst0;
+ pdst0 += 8;
+
+ for (i = (n >> 3); i--;)
+ {
+ LD_SP2(psrc1, 4, src0, src1);
+ psrc1 += 8;
+
+ ST_SP2(src0, src1, pdst1, 4);
+ pdst1 += 8 * m;
+ }
+
+ if (n & 4)
+ {
+ src0 = LD_SP(psrc1);
+ psrc1 += 4;
+
+ ST_SP(src0, pdst2);
+ pdst2 += 4;
+ }
+
+ if (n & 2)
+ {
+ *pdst3++ = *psrc1++;
+ *pdst3++ = *psrc1++;
+ }
+
+ if (n & 1)
+ {
+ *pdst4++ = *psrc1++;
+ }
+ }
+
+ return 0;
+}
#define GEMM_DEFAULT_OFFSET_B 0
#define GEMM_DEFAULT_ALIGN 0x03fffUL
-#define SGEMM_DEFAULT_UNROLL_M 2
-#define SGEMM_DEFAULT_UNROLL_N 2
+#define SGEMM_DEFAULT_UNROLL_M 8
+#define SGEMM_DEFAULT_UNROLL_N 8
#define DGEMM_DEFAULT_UNROLL_M 8
#define DGEMM_DEFAULT_UNROLL_N 4
#define GEMM_DEFAULT_OFFSET_B 0
#define GEMM_DEFAULT_ALIGN 0x03fffUL
-#define SGEMM_DEFAULT_UNROLL_M 2
-#define SGEMM_DEFAULT_UNROLL_N 2
+#define SGEMM_DEFAULT_UNROLL_M 8
+#define SGEMM_DEFAULT_UNROLL_N 8
#define DGEMM_DEFAULT_UNROLL_M 8
#define DGEMM_DEFAULT_UNROLL_N 4