optimizations of software prefetching
authorwjc404 <52632443+wjc404@users.noreply.github.com>
Tue, 5 Nov 2019 05:36:56 +0000 (13:36 +0800)
committerGitHub <noreply@github.com>
Tue, 5 Nov 2019 05:36:56 +0000 (13:36 +0800)
kernel/x86_64/dgemm_kernel_4x8_skylakex_2.c

index a958a1a..72878ac 100644 (file)
     "decq %2;cmpq $1,%2;jnb "#nn"01b;"\\r
     #nn"00:\n\t"\r
 \r
+/* %10 for prefetch of C elements before storage; %4 = ldc(in bytes),%11 for prefetch of next B block */\r
 #define INNER_KERNELm8(nn) \\r
-    "cmpq $8,%2;jb "#nn"001f;"\\r
+    "movq %3,%10;cmpq $16,%2;jb "#nn"001f;"\\r
     #nn"008:\n\t"\\r
     INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
     INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
+    "prefetcht1 (%10); prefetcht1 63(%10); addq %4,%10;"\\r
     INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
     INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
-    INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
-    INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
-    INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
-    INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
-    "subq $8,%2;cmpq $8,%2;jnb "#nn"008b;"\\r
+    "prefetcht1 (%11); addq $16,%11;"\\r
+    "subq $4,%2;cmpq $16,%2;jnb "#nn"008b;"\\r
+    "movq %3,%10;"\\r
     #nn"001:\n\t"\\r
     "cmpq $1,%2;jb "#nn"000f;"\\r
+    "prefetcht0 (%10); prefetcht0 63(%10); prefetcht0 (%10,%4,1); prefetcht0 63(%10,%4,1); leaq (%10,%4,2),%10;"\\r
     INNER_KERNEL_k1m8n##nn "addq $32,%0;"\\r
     "decq %2;jmp "#nn"001b;"\\r
     ""#nn"000:\n\t"\r
 \r
 #define INNER_STORE_m1n8(c1,disp) \\r
     "kxnorw %%k1,%%k1,%%k1;"\\r
-    "vgatherqpd "#disp"(%3,%%zmm6,1), %%zmm7 %{%%k1%};"\\r
+    "vgatherqpd "#disp"(%10,%%zmm6,1), %%zmm7 %{%%k1%};"\\r
     "vfmadd132pd %%zmm3,%%zmm7,"#c1";"\\r
     "kxnorw %%k1,%%k1,%%k1;"\\r
-    "vscatterqpd "#c1", "#disp"(%3,%%zmm6,1) %{%%k1%};"\r
+    "vscatterqpd "#c1", "#disp"(%10,%%zmm6,1) %{%%k1%};"\r
 \r
 #define INNER_SAVE_m1n8 \\r
+    "movq %3,%10;"\\r
     INNER_SETINDEX\\r
     INNER_STORE_m1n8(%%zmm8,0)\r
 \r
 #define INNER_SAVE_m1n16 \\r
     INNER_SAVE_m1n8\\r
-    "leaq (%3,%4,8),%3;"\\r
+    "leaq (%10,%4,8),%10;"\\r
     INNER_STORE_m1n8(%%zmm9,0)\r
 \r
 #define INNER_SAVE_m1n24 \\r
     INNER_SAVE_m1n16\\r
-    "leaq (%3,%4,8),%3;"\\r
+    "leaq (%10,%4,8),%10;"\\r
     INNER_STORE_m1n8(%%zmm10,0)\r
 \r
 #define INNER_SAVE_m2n8 \\r
+    "movq %3,%10;"\\r
     INNER_SETINDEX\\r
     INNER_STORE_m1n8(%%zmm8,0)\\r
     INNER_STORE_m1n8(%%zmm9,8)\r
 \r
 #define INNER_SAVE_m2n16 \\r
+    "movq %3,%10;"\\r
     INNER_SETINDEX\\r
     INNER_STORE_m1n8(%%zmm8,0)\\r
     INNER_STORE_m1n8(%%zmm10,8)\\r
-    "leaq (%3,%4,8),%3;"\\r
+    "leaq (%10,%4,8),%10;"\\r
     INNER_STORE_m1n8(%%zmm9,0)\\r
     INNER_STORE_m1n8(%%zmm11,8)\r
+\r
 #define INNER_SAVE_m2n24 \\r
+    "movq %3,%10;"\\r
     INNER_SETINDEX\\r
     INNER_STORE_m1n8(%%zmm8,0)\\r
     INNER_STORE_m1n8(%%zmm11,8)\\r
-    "leaq (%3,%4,8),%3;"\\r
+    "leaq (%10,%4,8),%10;"\\r
     INNER_STORE_m1n8(%%zmm9,0)\\r
     INNER_STORE_m1n8(%%zmm12,8)\\r
-    "leaq (%3,%4,8),%3;"\\r
+    "leaq (%10,%4,8),%10;"\\r
     INNER_STORE_m1n8(%%zmm10,0)\\r
     INNER_STORE_m1n8(%%zmm13,8)\r
-#define INNER_PREF_8x8 \\r
-    "prefetcht0 (%3); prefetcht0 56(%3); prefetcht0 (%3,%4,1); prefetcht0 56(%3,%4,1); prefetcht0 (%3,%4,2); prefetcht0 56(%3,%4,2);"\\r
-    "prefetcht0 (%3,%4,4); prefetcht0 56(%3,%4,4); leaq (%3,%4,2),%3;"\\r
-    "prefetcht0 (%3,%4,1); prefetcht0 56(%3,%4,1); prefetcht0 (%3,%4,4); prefetcht0 56(%3,%4,4); leaq (%3,%4,1),%3;"\\r
-    "prefetcht0 (%3,%4,2); prefetcht0 56(%3,%4,2); prefetcht0 (%3,%4,4); prefetcht0 56(%3,%4,4);"\\r
-    "subq %4,%3; subq %4,%3; subq %4,%3;"\r
+\r
 #define INNER_TRANS_4x8(c1,c2,c3,c4) \\r
     "vunpcklpd "#c2","#c1",%%zmm4;vunpckhpd "#c2","#c1",%%zmm5;vunpcklpd "#c4","#c3",%%zmm6;vunpckhpd "#c4","#c3",%%zmm7;"\\r
     "vblendmpd %%zmm6,%%zmm4,"#c1"%{%6%};vblendmpd %%zmm7,%%zmm5,"#c3"%{%6%};"\\r
     "vblendmpd %%zmm4,"#c1",%%zmm4%{%6%};vblendmpd %%zmm5,"#c3","#c2"%{%6%};"\\r
     "vblendmpd "#c1",%%zmm6,%%zmm6%{%6%};vblendmpd "#c3",%%zmm7,"#c4"%{%6%};"\\r
     "vmovapd %%zmm4,"#c1"; vmovapd %%zmm6,"#c3";"\r
+\r
 #define INNER_TRANS_8x8(c1,c2,c3,c4,c5,c6,c7,c8) \\r
     INNER_TRANS_4x8(c1,c2,c3,c4)\\r
     INNER_TRANS_4x8(c5,c6,c7,c8)\\r
     "vblendmpd "#c3",%%zmm6,"#c3"%{%5%};vblendmpd  %%zmm6,"#c7","#c7"%{%5%};"\\r
     "vblendmpd "#c8","#c4",%%zmm7%{%5%};vshuff64x2 $0x4e,%%zmm7,%%zmm7,%%zmm7;"\\r
     "vblendmpd "#c4",%%zmm7,"#c4"%{%5%};vblendmpd  %%zmm7,"#c8","#c8"%{%5%};"\r
+\r
 //%7 for k01(input) only when m=4\r
 #define INNER_STORE_4x8(c1,c2,c3,c4) \\r
-    "vmovupd (%3),%%zmm4%{%5%};vmovupd -32(%3,%4,4),%%zmm4%{%7%};vfmadd132pd %%zmm3,%%zmm4,"#c1";"\\r
-    "vmovupd "#c1",(%3)%{%5%}; vmovupd "#c1",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\\r
-    "vmovupd (%3),%%zmm5%{%5%};vmovupd -32(%3,%4,4),%%zmm5%{%7%};vfmadd132pd %%zmm3,%%zmm5,"#c2";"\\r
-    "vmovupd "#c2",(%3)%{%5%}; vmovupd "#c2",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\\r
-    "vmovupd (%3),%%zmm6%{%5%};vmovupd -32(%3,%4,4),%%zmm6%{%7%};vfmadd132pd %%zmm3,%%zmm6,"#c3";"\\r
-    "vmovupd "#c3",(%3)%{%5%}; vmovupd "#c3",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\\r
-    "vmovupd (%3),%%zmm7%{%5%};vmovupd -32(%3,%4,4),%%zmm7%{%7%};vfmadd132pd %%zmm3,%%zmm7,"#c4";"\\r
-    "vmovupd "#c4",(%3)%{%5%}; vmovupd "#c4",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\\r
-    "leaq (%3,%4,4),%3;"\r
+    "vmovupd (%10),%%zmm4%{%5%};vmovupd -32(%10,%4,4),%%zmm4%{%7%};vfmadd132pd %%zmm3,%%zmm4,"#c1";"\\r
+    "vmovupd "#c1",(%10)%{%5%}; vmovupd "#c1",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\\r
+    "vmovupd (%10),%%zmm5%{%5%};vmovupd -32(%10,%4,4),%%zmm5%{%7%};vfmadd132pd %%zmm3,%%zmm5,"#c2";"\\r
+    "vmovupd "#c2",(%10)%{%5%}; vmovupd "#c2",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\\r
+    "vmovupd (%10),%%zmm6%{%5%};vmovupd -32(%10,%4,4),%%zmm6%{%7%};vfmadd132pd %%zmm3,%%zmm6,"#c3";"\\r
+    "vmovupd "#c3",(%10)%{%5%}; vmovupd "#c3",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\\r
+    "vmovupd (%10),%%zmm7%{%5%};vmovupd -32(%10,%4,4),%%zmm7%{%7%};vfmadd132pd %%zmm3,%%zmm7,"#c4";"\\r
+    "vmovupd "#c4",(%10)%{%5%}; vmovupd "#c4",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\\r
+    "leaq (%10,%4,4),%10;"\r
+\r
 #define INNER_STORE_8x8(c1,c2,c3,c4,c5,c6,c7,c8) \\r
-    "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\\r
-    "vfmadd213pd (%3),%%zmm3,"#c1"; vmovupd "#c1",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c2"; vmovupd "#c2",(%3,%4,1); leaq (%3,%4,2),%3;"\\r
-    "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\\r
-    "vfmadd213pd (%3),%%zmm3,"#c3"; vmovupd "#c3",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c4"; vmovupd "#c4",(%3,%4,1); leaq (%3,%4,2),%3;"\\r
-    "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\\r
-    "vfmadd213pd (%3),%%zmm3,"#c5"; vmovupd "#c5",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c6"; vmovupd "#c6",(%3,%4,1); leaq (%3,%4,2),%3;"\\r
-    "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\\r
-    "vfmadd213pd (%3),%%zmm3,"#c7"; vmovupd "#c7",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c8"; vmovupd "#c8",(%3,%4,1); leaq (%3,%4,2),%3;"\r
+    "vfmadd213pd (%10),%%zmm3,"#c1"; vmovupd "#c1",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c2"; vmovupd "#c2",(%10,%4,1); leaq (%10,%4,2),%10;"\\r
+    "vfmadd213pd (%10),%%zmm3,"#c3"; vmovupd "#c3",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c4"; vmovupd "#c4",(%10,%4,1); leaq (%10,%4,2),%10;"\\r
+    "vfmadd213pd (%10),%%zmm3,"#c5"; vmovupd "#c5",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c6"; vmovupd "#c6",(%10,%4,1); leaq (%10,%4,2),%10;"\\r
+    "vfmadd213pd (%10),%%zmm3,"#c7"; vmovupd "#c7",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c8"; vmovupd "#c8",(%10,%4,1); leaq (%10,%4,2),%10;"\r
+\r
 #define INNER_SAVE_m4n8 \\r
+    "movq %3,%10;"\\r
     INNER_TRANS_4x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11)\\r
     INNER_STORE_4x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11)\r
+\r
 #define INNER_SAVE_m4n16 \\r
+    "movq %3,%10;"\\r
     INNER_TRANS_4x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14)\\r
     INNER_STORE_4x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14)\\r
     INNER_TRANS_4x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15)\\r
     INNER_STORE_4x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15)\r
+\r
 #define INNER_SAVE_m4n24 \\r
+    "movq %3,%10;"\\r
     INNER_TRANS_4x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17)\\r
     INNER_STORE_4x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17)\\r
     INNER_TRANS_4x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18)\\r
     INNER_STORE_4x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18)\\r
     INNER_TRANS_4x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19)\\r
     INNER_STORE_4x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19)\r
+\r
 #define INNER_SAVE_m8n8 \\r
-    INNER_PREF_8x8\\r
+    "movq %3,%10;"\\r
     INNER_TRANS_8x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11,%%zmm12,%%zmm13,%%zmm14,%%zmm15)\\r
     INNER_STORE_8x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11,%%zmm12,%%zmm13,%%zmm14,%%zmm15)\r
+\r
 #define INNER_SAVE_m8n16 \\r
-    INNER_PREF_8x8\\r
+    "movq %3,%10;"\\r
     INNER_TRANS_8x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14,%%zmm16,%%zmm18,%%zmm20,%%zmm22)\\r
     INNER_STORE_8x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14,%%zmm16,%%zmm18,%%zmm20,%%zmm22)\\r
-    INNER_PREF_8x8\\r
     INNER_TRANS_8x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15,%%zmm17,%%zmm19,%%zmm21,%%zmm23)\\r
     INNER_STORE_8x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15,%%zmm17,%%zmm19,%%zmm21,%%zmm23)\r
+\r
 #define INNER_SAVE_m8n24 \\r
-    INNER_PREF_8x8\\r
+    "movq %3,%10;"\\r
     INNER_TRANS_8x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17,%%zmm20,%%zmm23,%%zmm26,%%zmm29)\\r
     INNER_STORE_8x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17,%%zmm20,%%zmm23,%%zmm26,%%zmm29)\\r
-    INNER_PREF_8x8\\r
     INNER_TRANS_8x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18,%%zmm21,%%zmm24,%%zmm27,%%zmm30)\\r
     INNER_STORE_8x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18,%%zmm21,%%zmm24,%%zmm27,%%zmm30)\\r
-    INNER_PREF_8x8\\r
     INNER_TRANS_8x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19,%%zmm22,%%zmm25,%%zmm28,%%zmm31)\\r
     INNER_STORE_8x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19,%%zmm22,%%zmm25,%%zmm28,%%zmm31)\r
 \r
 #define COMPUTE_n8 {\\r
+    b_pref = packed_b_pointer + 8 * K;\\r
     __asm__ __volatile__(\\r
     "vbroadcastsd (%9),%%zmm3;"\\r
     "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $5,%%r12;"\\r
     INNER_KERNELm8(8)\\r
     INNER_SAVE_m8n8\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1; addq %%r12,%0;"\\r
-    "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $64,%3;"\\r
+    "addq $64,%3;"\\r
     "subq $8,%8; cmpq $8,%8; jnb 42221b;"\\r
     "42222:\n\t"\\r
     "cmpq $4,%8; jb 42223f;"\\r
     INNER_KERNELm4(8)\\r
     INNER_SAVE_m4n8\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\\r
-    "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $32,%3;"\\r
+    "addq $32,%3;"\\r
     "subq $4,%8;"\\r
     "42223:\n\t"\\r
     "cmpq $2,%8; jb 42224f;"\\r
     "42225:\n\t"\\r
     "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\\r
     "shlq $3,%4;addq %4,%3;shrq $3,%4;"\\r
-    :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),"+r"(M),"+r"(alpha)\\r
+    :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),\\r
+    "+r"(M),"+r"(alpha),"+r"(c_store),"+r"(b_pref)\\r
     ::"zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","cc","memory","k1","r12","r13","r14");\\r
     a_block_pointer -= M * K;\\r
 }\r
 #define COMPUTE_n16 {\\r
+    b_pref = packed_b_pointer + 16 * K;\\r
     __asm__ __volatile__(\\r
     "vbroadcastsd (%9),%%zmm3;"\\r
     "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $5,%%r12;"\\r
     INNER_KERNELm8(16)\\r
     INNER_SAVE_m8n16\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1; addq %%r12,%0;"\\r
-    "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $64,%3;"\\r
+    "addq $64,%3;"\\r
     "subq $8,%8; cmpq $8,%8; jnb 32221b;"\\r
     "32222:\n\t"\\r
     "cmpq $4,%8; jb 32223f;"\\r
     INNER_KERNELm4(16)\\r
     INNER_SAVE_m4n16\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\\r
-    "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $32,%3;"\\r
+    "addq $32,%3;"\\r
     "subq $4,%8;"\\r
     "32223:\n\t"\\r
     "cmpq $2,%8; jb 32224f;"\\r
     INNER_KERNELm2(16)\\r
     INNER_SAVE_m2n16\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\\r
-    "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $16,%3;"\\r
+    "addq $16,%3;"\\r
     "subq $2,%8;"\\r
     "32224:\n\t"\\r
     "cmpq $1,%8; jb 32225f;"\\r
     INNER_KERNELm1(16)\\r
     INNER_SAVE_m1n16\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\\r
-    "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $8,%3;"\\r
+    "addq $8,%3;"\\r
     "32225:\n\t"\\r
     "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\\r
     "shlq $4,%4;addq %4,%3;shrq $4,%4;"\\r
     "leaq (%1,%%r12,4),%1;"\\r
-    :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),"+r"(M),"+r"(alpha)\\r
+    :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),\\r
+    "+r"(M),"+r"(alpha),"+r"(c_store),"+r"(b_pref)\\r
     ::"zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17",\\r
     "zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","cc","memory","k1","r12","r13","r14");\\r
     a_block_pointer -= M * K;\\r
 }\r
 #define COMPUTE_n24 {\\r
+    b_pref = packed_b_pointer + 24 * K;\\r
     __asm__ __volatile__(\\r
     "vbroadcastsd (%9),%%zmm3;"\\r
     "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $5,%%r12;"\\r
     INNER_KERNELm8(24)\\r
     INNER_SAVE_m8n24\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1; addq %%r12,%0;"\\r
-    "shlq $3,%4;subq %4,%3;shlq $1,%4;subq %4,%3;shrq $4,%4;addq $64,%3;"\\r
+    "addq $64,%3;"\\r
     "subq $8,%8; cmpq $8,%8; jnb 22221b;"\\r
     "22222:\n\t"\\r
     "cmpq $4,%8; jb 22223f;"\\r
     INNER_KERNELm4(24)\\r
     INNER_SAVE_m4n24\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\\r
-    "shlq $3,%4;subq %4,%3;shlq $1,%4;subq %4,%3;shrq $4,%4;addq $32,%3;"\\r
+    "addq $32,%3;"\\r
     "subq $4,%8;"\\r
     "22223:\n\t"\\r
     "cmpq $2,%8; jb 22224f;"\\r
     INNER_KERNELm2(24)\\r
     INNER_SAVE_m2n24\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\\r
-    "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $16,%3;"\\r
+    "addq $16,%3;"\\r
     "subq $2,%8;"\\r
     "22224:\n\t"\\r
     "cmpq $1,%8; jb 22225f;"\\r
     INNER_KERNELm1(24)\\r
     INNER_SAVE_m1n24\\r
     "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\\r
-    "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $8,%3;"\\r
+    "addq $8,%3;"\\r
     "22225:\n\t"\\r
     "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\\r
     "shlq $3,%4;addq %4,%3;shlq $1,%4;addq %4,%3;shrq $4,%4;"\\r
     "leaq (%1,%%r12,4),%1; leaq (%1,%%r12,2),%1;"\\r
-    :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),"+r"(M),"+r"(alpha)\\r
+    :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),\\r
+    "+r"(M),"+r"(alpha),"+r"(c_store),"+r"(b_pref)\\r
     ::"zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19",\\r
     "zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31","cc","memory","k1","r12","r13","r14");\\r
     a_block_pointer -= M * K;\\r
@@ -415,8 +427,8 @@ static void KERNEL_MAIN(double *packed_a, double *packed_b, BLASLONG m, BLASLONG
     if(k==0 || m==0 || ndiv8==0) return;\r
     int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double);\r
     int64_t K = (int64_t)k; int64_t M = (int64_t)m;\r
-    double *a_block_pointer;\r
-    double *c_pointer = c;\r
+    double *a_block_pointer,*b_pref;\r
+    double *c_pointer = c,*c_store = c;\r
     __mmask16 k01 = 0x00f0,k02 = 0x000f,k03 = 0x0033;\r
     BLASLONG ndiv8_count;\r
     double *packed_b_pointer = packed_b;\r