#define pCRow0 x12
#define pCRow1 x13
#define pCRow2 x14
-#define pA x15
-#define ppC x16
-#define ppCRow0 x17
-#define ppCRow1 x18
-#define ppCRow2 x19
-#define ppA x20
+#define pCRow3 x15
+#define pA x16
+#define ppC x17
+#define ppCRow0 x18
+#define ppCRow1 x19
+#define ppCRow2 x20
+#define ppCRow3 x21
+#define ppA x22
+#define alpha x23
#define alpha0 d10
#define alphaV0 v10.d[0]
-#define alpha1 d11
-#define alphaV1 v11.d[0]
-#define alpha2 d14
-#define alphaV2 v14.d[0]
-#define alpha3 d15
-#define alphaV3 v15.d[0]
+
+#define A_PRE_SIZE 1024
+#define B_PRE_SIZE 1024
+#define C_PRE_SIZE 128
// 00 origM
// 01 origN
// 12 pCRow0
// 13 pCRow1
// 14 pCRow2
-// 15 pA
-// 16 ppC
-// 17 ppCRow0
-// 18 must save ppCRow1
-// 19 must save ppCRow2
-// 20 must save ppA
-// 21 must save
-// 22 must save
-// 23 must save
+// 15 pCRow3
+// 16 pA
+// 17 ppC
+// 18 must save ppCRow0
+// 19 must save ppCRow1
+// 20 must save ppCRow2
+// 21 must save ppCRow3
+// 22 must save ppA
+// 23 must save alpha
// 24 must save
// 25 must save
// 26 must save
//v08 must save pB00, pB01
//v09 must save pB02, pB03
//v10 must save ALPHA0
-//v11 must save ALPHA1
+//v11 must save
//v12 must save pB10, pB11
//v13 must save pB12, pB13
-//v14 must save ALPHA2
-//v15 must save ALPHA3
+//v14 must save
+//v15 must save
//v16 must save C00, C01
//v17 must save C02, C03
//v18 ppC00, ppC01
.endm
.macro KERNEL8x4_I
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ld1 {v0.2d, v1.2d}, [pA]
+ ldp d8, d9, [pB]
+ add pB, pB, #16
+ ldp d10, d11, [pB]
+ add pB, pB, #16
+
+ ldp q0, q1, [pA]
add pA, pA, #32
fmul v16.2d, v0.2d, v8.2d[0]
- fmul v29.2d, v1.2d, v9.2d[1]
+ fmul v29.2d, v1.2d, v11.2d[0]
- ld1 {v2.2d, v3.2d}, [ppA]
+ ldp q2, q3, [ppA]
add ppA, ppA, #32
- fmul v20.2d, v0.2d, v8.2d[1]
- fmul v25.2d, v1.2d, v9.2d[0]
+ fmul v20.2d, v0.2d, v9.2d[0]
+ fmul v25.2d, v1.2d, v10.2d[0]
+
+ prfm PLDL1KEEP, [pA, #A_PRE_SIZE]
fmul v18.2d, v2.2d, v8.2d[0]
- fmul v31.2d, v3.2d, v9.2d[1]
- fmul v22.2d, v2.2d, v8.2d[1]
- fmul v27.2d, v3.2d, v9.2d[0]
+ fmul v31.2d, v3.2d, v11.2d[0]
- ld1 {v12.2d, v13.2d}, [pB] // for next round
- add pB, pB, #32
+ prfm PLDL1KEEP, [ppA, #A_PRE_SIZE]
+
+ fmul v22.2d, v2.2d, v9.2d[0]
+ fmul v27.2d, v3.2d, v10.2d[0]
+
+ ldp d12, d13, [pB]
+ add pB, pB, #16
- fmul v24.2d, v0.2d, v9.2d[0]
- fmul v21.2d, v1.2d, v8.2d[1]
+ fmul v24.2d, v0.2d, v10.2d[0]
+ fmul v21.2d, v1.2d, v9.2d[0]
- ld1 {v4.2d, v5.2d} , [pA] // for next round
+ ldp q4, q5, [pA] // for next round
add pA, pA, #32
- fmul v26.2d, v2.2d, v9.2d[0]
- fmul v23.2d, v3.2d, v8.2d[1]
+ fmul v26.2d, v2.2d, v10.2d[0]
+ fmul v23.2d, v3.2d, v9.2d[0]
- ld1 {v6.2d, v7.2d} , [ppA] // for next round
+ ldp q6, q7, [ppA] // for next round
add ppA, ppA, #32
- fmul v28.2d, v0.2d, v9.2d[1]
+ fmul v28.2d, v0.2d, v11.2d[0]
fmul v17.2d, v1.2d, v8.2d[0]
- fmul v30.2d, v2.2d, v9.2d[1]
+
+ ldp d14, d15, [pB]
+ add pB, pB, #16
+
+ fmul v30.2d, v2.2d, v11.2d[0]
fmul v19.2d, v3.2d, v8.2d[0]
.endm
.macro KERNEL8x4_M2
fmla v16.2d, v4.2d, v12.2d[0]
- fmla v29.2d, v5.2d, v13.2d[1]
+ fmla v29.2d, v5.2d, v15.2d[0]
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
+ ldp d8, d9, [pB]
+ add pB, pB, #16
fmla v18.2d, v6.2d, v12.2d[0]
- fmla v31.2d, v7.2d, v13.2d[1]
- fmla v20.2d, v4.2d, v12.2d[1]
- fmla v25.2d, v5.2d, v13.2d[0]
+ fmla v31.2d, v7.2d, v15.2d[0]
- prfm PLDL1KEEP, [pB, #512]
+ ldp d10, d11, [pB]
+ add pB, pB, #16
- fmla v22.2d, v6.2d, v12.2d[1]
- fmla v27.2d, v7.2d, v13.2d[0]
- fmla v24.2d, v4.2d, v13.2d[0]
- fmla v21.2d, v5.2d, v12.2d[1]
+ fmla v20.2d, v4.2d, v13.2d[0]
+ fmla v25.2d, v5.2d, v14.2d[0]
- ld1 {v0.2d, v1.2d}, [pA]
+ prfm PLDL1KEEP, [pB, #B_PRE_SIZE]
+
+ fmla v22.2d, v6.2d, v13.2d[0]
+ fmla v27.2d, v7.2d, v14.2d[0]
+ fmla v24.2d, v4.2d, v14.2d[0]
+ fmla v21.2d, v5.2d, v13.2d[0]
+
+ ldp q0, q1, [pA]
add pA, pA, #32
- fmla v26.2d, v6.2d, v13.2d[0]
- fmla v23.2d, v7.2d, v12.2d[1]
- fmla v28.2d, v4.2d, v13.2d[1]
+ fmla v26.2d, v6.2d, v14.2d[0]
+ fmla v23.2d, v7.2d, v13.2d[0]
+ fmla v28.2d, v4.2d, v15.2d[0]
fmla v17.2d, v5.2d, v12.2d[0]
- ld1 {v2.2d, v3.2d}, [ppA]
+ ldp q2, q3, [ppA]
add ppA, ppA, #32
- fmla v30.2d, v6.2d, v13.2d[1]
+ fmla v30.2d, v6.2d, v15.2d[0]
fmla v19.2d, v7.2d, v12.2d[0]
.endm
.macro KERNEL8x4_M1
fmla v16.2d, v0.2d, v8.2d[0]
- fmla v29.2d, v1.2d, v9.2d[1]
+ fmla v29.2d, v1.2d, v11.2d[0]
- ld1 {v12.2d, v13.2d}, [pB] // for next round
- add pB, pB, #32
+ ldp d12, d13, [pB]
+ add pB, pB, #16
fmla v18.2d, v2.2d, v8.2d[0]
- fmla v31.2d, v3.2d, v9.2d[1]
- fmla v20.2d, v0.2d, v8.2d[1]
- fmla v25.2d, v1.2d, v9.2d[0]
+ fmla v31.2d, v3.2d, v11.2d[0]
- prfm PLDL1KEEP, [pA, #512]
+ ldp d14, d15, [pB]
+ add pB, pB, #16
- fmla v22.2d, v2.2d, v8.2d[1]
- fmla v27.2d, v3.2d, v9.2d[0]
+ fmla v20.2d, v0.2d, v9.2d[0]
+ fmla v25.2d, v1.2d, v10.2d[0]
- prfm PLDL1KEEP, [ppA, #512]
+ prfm PLDL1KEEP, [pA, #A_PRE_SIZE]
- fmla v24.2d, v0.2d, v9.2d[0]
- fmla v21.2d, v1.2d, v8.2d[1]
+ fmla v22.2d, v2.2d, v9.2d[0]
+ fmla v27.2d, v3.2d, v10.2d[0]
+
+ prfm PLDL1KEEP, [ppA, #A_PRE_SIZE]
+
+ fmla v24.2d, v0.2d, v10.2d[0]
+ fmla v21.2d, v1.2d, v9.2d[0]
- ld1 {v4.2d, v5.2d} , [pA] // for next round
+ ldp q4, q5, [pA]
add pA, pA, #32
- fmla v26.2d, v2.2d, v9.2d[0]
- fmla v23.2d, v3.2d, v8.2d[1]
- fmla v28.2d, v0.2d, v9.2d[1]
+ fmla v26.2d, v2.2d, v10.2d[0]
+ fmla v23.2d, v3.2d, v9.2d[0]
+
+ fmla v28.2d, v0.2d, v11.2d[0]
fmla v17.2d, v1.2d, v8.2d[0]
- ld1 {v6.2d, v7.2d} , [ppA] // for next round
+ ldp q6, q7, [ppA]
add ppA, ppA, #32
- fmla v30.2d, v2.2d, v9.2d[1]
+ fmla v30.2d, v2.2d, v11.2d[0]
fmla v19.2d, v3.2d, v8.2d[0]
.endm
.macro KERNEL8x4_E
fmla v16.2d, v4.2d, v12.2d[0]
- fmla v25.2d, v5.2d, v13.2d[0]
+ fmla v25.2d, v5.2d, v14.2d[0]
fmla v18.2d, v6.2d, v12.2d[0]
- fmla v27.2d, v7.2d, v13.2d[0]
+ fmla v27.2d, v7.2d, v14.2d[0]
- fmla v20.2d, v4.2d, v12.2d[1]
- fmla v29.2d, v5.2d, v13.2d[1]
- fmla v22.2d, v6.2d, v12.2d[1]
- fmla v31.2d, v7.2d, v13.2d[1]
+ fmla v20.2d, v4.2d, v13.2d[0]
+ fmla v29.2d, v5.2d, v15.2d[0]
+ fmla v22.2d, v6.2d, v13.2d[0]
+ fmla v31.2d, v7.2d, v15.2d[0]
- fmla v24.2d, v4.2d, v13.2d[0]
+ fmla v24.2d, v4.2d, v14.2d[0]
fmla v17.2d, v5.2d, v12.2d[0]
- fmla v26.2d, v6.2d, v13.2d[0]
+ fmla v26.2d, v6.2d, v14.2d[0]
fmla v19.2d, v7.2d, v12.2d[0]
- fmla v28.2d, v4.2d, v13.2d[1]
- fmla v21.2d, v5.2d, v12.2d[1]
- fmla v30.2d, v6.2d, v13.2d[1]
- fmla v23.2d, v7.2d, v12.2d[1]
+ fmla v28.2d, v4.2d, v15.2d[0]
+ fmla v21.2d, v5.2d, v13.2d[0]
+ fmla v30.2d, v6.2d, v15.2d[0]
+ fmla v23.2d, v7.2d, v13.2d[0]
.endm
.macro KERNEL8x4_SUB
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ld1 {v0.2d, v1.2d}, [pA]
+ ldp d8, d9, [pB]
+ add pB, pB, #16
+ ldp d10, d11, [pB]
+ add pB, pB, #16
+ ldp q0, q1, [pA]
add pA, pA, #32
fmla v16.2d, v0.2d, v8.2d[0]
- fmla v29.2d, v1.2d, v9.2d[1]
- fmla v20.2d, v0.2d, v8.2d[1]
- fmla v25.2d, v1.2d, v9.2d[0]
+ fmla v29.2d, v1.2d, v11.2d[0]
+ fmla v20.2d, v0.2d, v9.2d[0]
+ fmla v25.2d, v1.2d, v10.2d[0]
- ld1 {v2.2d, v3.2d}, [ppA]
+ ldp q2, q3, [ppA]
add ppA, ppA, #32
- fmla v24.2d, v0.2d, v9.2d[0]
- fmla v21.2d, v1.2d, v8.2d[1]
- fmla v28.2d, v0.2d, v9.2d[1]
+ fmla v24.2d, v0.2d, v10.2d[0]
+ fmla v21.2d, v1.2d, v9.2d[0]
+ fmla v28.2d, v0.2d, v11.2d[0]
fmla v17.2d, v1.2d, v8.2d[0]
fmla v18.2d, v2.2d, v8.2d[0]
- fmla v31.2d, v3.2d, v9.2d[1]
- fmla v22.2d, v2.2d, v8.2d[1]
- fmla v27.2d, v3.2d, v9.2d[0]
+ fmla v31.2d, v3.2d, v11.2d[0]
+ fmla v22.2d, v2.2d, v9.2d[0]
+ fmla v27.2d, v3.2d, v10.2d[0]
- fmla v26.2d, v2.2d, v9.2d[0]
- fmla v23.2d, v3.2d, v8.2d[1]
- fmla v30.2d, v2.2d, v9.2d[1]
+ fmla v26.2d, v2.2d, v10.2d[0]
+ fmla v23.2d, v3.2d, v9.2d[0]
+ fmla v30.2d, v2.2d, v11.2d[0]
fmla v19.2d, v3.2d, v8.2d[0]
.endm
.macro SAVE8x4
+ fmov alpha0, alpha
+
+ prfm PLDL2KEEP, [pCRow0, #C_PRE_SIZE]
add ppCRow0, pCRow0, #32
- ld1 {v0.2d, v1.2d}, [pCRow0]
+ ldp q0, q1, [pCRow0]
fmla v0.2d, v16.2d, alphaV0
- fmla v1.2d, v17.2d, alphaV1
- st1 {v0.2d, v1.2d}, [pCRow0]
+ fmla v1.2d, v17.2d, alphaV0
+ stp q0, q1, [pCRow0]
- ld1 {v2.2d, v3.2d}, [ppCRow0]
- fmla v2.2d, v18.2d, alphaV2
- fmla v3.2d, v19.2d, alphaV3
- st1 {v2.2d, v3.2d}, [ppCRow0]
+ add pCRow0, pCRow0, #64
- add pCRow1, pCRow0, LDC
- add ppCRow1, ppCRow0, LDC
+ ldp q2, q3, [ppCRow0]
+ fmla v2.2d, v18.2d, alphaV0
+ fmla v3.2d, v19.2d, alphaV0
+ stp q2, q3, [ppCRow0]
- ld1 {v4.2d, v5.2d}, [pCRow1]
+ prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE]
+ add ppCRow1, pCRow1, #32
+
+ ldp q4, q5, [pCRow1]
fmla v4.2d, v20.2d, alphaV0
- fmla v5.2d, v21.2d, alphaV1
- st1 {v4.2d, v5.2d}, [pCRow1]
+ fmla v5.2d, v21.2d, alphaV0
+ stp q4, q5, [pCRow1]
- ld1 {v6.2d, v7.2d}, [ppCRow1]
- fmla v6.2d, v22.2d, alphaV2
- fmla v7.2d, v23.2d, alphaV3
- st1 {v6.2d, v7.2d}, [ppCRow1]
+ add pCRow1, pCRow1, #64
- add pCRow2, pCRow1, LDC
- add ppCRow2, ppCRow1, LDC
+ ldp q6, q7, [ppCRow1]
+ fmla v6.2d, v22.2d, alphaV0
+ fmla v7.2d, v23.2d, alphaV0
+ stp q6, q7, [ppCRow1]
+
+ prfm PLDL2KEEP, [pCRow2, #C_PRE_SIZE]
+ add ppCRow2, pCRow2, #32
- ld1 {v0.2d, v1.2d}, [pCRow2]
+ ldp q0, q1, [pCRow2]
fmla v0.2d, v24.2d, alphaV0
- fmla v1.2d, v25.2d, alphaV1
- st1 {v0.2d, v1.2d}, [pCRow2]
+ fmla v1.2d, v25.2d, alphaV0
+ stp q0, q1, [pCRow2]
- ld1 {v2.2d, v3.2d}, [ppCRow2]
- fmla v2.2d, v26.2d, alphaV2
- fmla v3.2d, v27.2d, alphaV3
- st1 {v2.2d, v3.2d}, [ppCRow2]
+ add pCRow2, pCRow2, #64
- add pCRow1, pCRow2, LDC
- add ppCRow1, ppCRow2, LDC
+ ldp q2, q3, [ppCRow2]
+ fmla v2.2d, v26.2d, alphaV0
+ fmla v3.2d, v27.2d, alphaV0
+ stp q2, q3, [ppCRow2]
+
+ prfm PLDL2KEEP, [pCRow3, #C_PRE_SIZE]
+ add ppCRow3, pCRow3, #32
- ld1 {v4.2d, v5.2d}, [pCRow1]
+ ldp q4, q5, [pCRow3]
fmla v4.2d, v28.2d, alphaV0
- fmla v5.2d, v29.2d, alphaV1
- st1 {v4.2d, v5.2d}, [pCRow1]
+ fmla v5.2d, v29.2d, alphaV0
+ stp q4, q5, [pCRow3]
- ld1 {v6.2d, v7.2d}, [ppCRow1]
- fmla v6.2d, v30.2d, alphaV2
- fmla v7.2d, v31.2d, alphaV3
- st1 {v6.2d, v7.2d}, [ppCRow1]
+ add pCRow3, pCRow3, #64
- add pCRow0, pCRow0, #64
+ ldp q6, q7, [ppCRow3]
+ fmla v6.2d, v30.2d, alphaV0
+ fmla v7.2d, v31.2d, alphaV0
+ stp q6, q7, [ppCRow3]
.endm
/******************************************************************************/
.endm
.macro SAVE4x4
+ fmov alpha0, alpha
+
ld1 {v8.2d, v9.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
- fmla v9.2d, v17.2d, alphaV1
+ fmla v9.2d, v17.2d, alphaV0
st1 {v8.2d, v9.2d}, [pCRow0]
add pCRow1, pCRow0, LDC
ld1 {v12.2d, v13.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV2
- fmla v13.2d, v21.2d, alphaV3
+ fmla v12.2d, v20.2d, alphaV0
+ fmla v13.2d, v21.2d, alphaV0
st1 {v12.2d, v13.2d}, [pCRow1]
add pCRow2, pCRow1, LDC
ld1 {v8.2d, v9.2d}, [pCRow2]
fmla v8.2d, v24.2d, alphaV0
- fmla v9.2d, v25.2d, alphaV1
+ fmla v9.2d, v25.2d, alphaV0
st1 {v8.2d, v9.2d}, [pCRow2]
add pCRow1, pCRow2, LDC
ld1 {v12.2d, v13.2d}, [pCRow1]
- fmla v12.2d, v28.2d, alphaV2
- fmla v13.2d, v29.2d, alphaV3
+ fmla v12.2d, v28.2d, alphaV0
+ fmla v13.2d, v29.2d, alphaV0
st1 {v12.2d, v13.2d}, [pCRow1]
add pCRow0, pCRow0, #32
.endm
.macro SAVE2x4
+ fmov alpha0, alpha
+
ld1 {v8.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.2d}, [pCRow0]
add pCRow1, pCRow0, LDC
ld1 {v12.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV1
+ fmla v12.2d, v20.2d, alphaV0
st1 {v12.2d}, [pCRow1]
add pCRow2, pCRow1, LDC
ld1 {v8.2d}, [pCRow2]
- fmla v8.2d, v24.2d, alphaV2
+ fmla v8.2d, v24.2d, alphaV0
st1 {v8.2d}, [pCRow2]
add pCRow1, pCRow2, LDC
ld1 {v12.2d}, [pCRow1]
- fmla v12.2d, v28.2d, alphaV3
+ fmla v12.2d, v28.2d, alphaV0
st1 {v12.2d}, [pCRow1]
add pCRow0, pCRow0, #16
.endm
.macro SAVE1x4
+ fmov alpha0, alpha
+
add pCRow1, pCRow0, LDC
ld1 {v8.d}[0], [pCRow0]
ld1 {v12.d}[0], [pCRow2]
ld1 {v12.d}[1], [pCRow1]
- fmla v12.2d, v20.2d, alphaV1
+ fmla v12.2d, v20.2d, alphaV0
st1 {v12.d}[0], [pCRow2]
st1 {v12.d}[1], [pCRow1]
.endm
.macro SAVE4x2
+ fmov alpha0, alpha
+
ld1 {v8.2d, v9.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
- fmla v9.2d, v17.2d, alphaV1
+ fmla v9.2d, v17.2d, alphaV0
st1 {v8.2d, v9.2d}, [pCRow0]
add pCRow1, pCRow0, LDC
ld1 {v12.2d, v13.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV2
- fmla v13.2d, v21.2d, alphaV3
+ fmla v12.2d, v20.2d, alphaV0
+ fmla v13.2d, v21.2d, alphaV0
st1 {v12.2d, v13.2d}, [pCRow1]
add pCRow0, pCRow0, #32
.endm
.macro SAVE2x2
+ fmov alpha0, alpha
+
ld1 {v8.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.2d}, [pCRow0]
add pCRow1 , pCRow0, LDC
ld1 {v12.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV1
+ fmla v12.2d, v20.2d, alphaV0
st1 {v12.2d}, [pCRow1]
add pCRow0, pCRow0, #16
.endm
.macro SAVE1x2
+ fmov alpha0, alpha
+
add pCRow1 , pCRow0, LDC
ld1 {v8.d}[0], [pCRow0]
.endm
.macro SAVE4x1
+ fmov alpha0, alpha
+
ld1 {v8.2d, v9.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
- fmla v9.2d, v17.2d, alphaV1
+ fmla v9.2d, v17.2d, alphaV0
st1 {v8.2d, v9.2d}, [pCRow0]
add pCRow0, pCRow0, #32
.endm
.macro SAVE2x1
+ fmov alpha0, alpha
+
ld1 {v8.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.2d}, [pCRow0]
.endm
.macro SAVE1x1
+ fmov alpha0, alpha
+
ldr d8, [pCRow0]
fmadd d8, d16, alpha0, d8
str d8, [pCRow0]
stp x26, x27, [sp, #(9 * 16)]
str x28, [sp, #(10 * 16)]
- fmov alpha0, d0
- fmov alpha1, d0
- fmov alpha2, d0
- fmov alpha3, d0
+ fmov alpha, d0
+ prfm PLDL1KEEP, [origPA]
+ prfm PLDL1KEEP, [origPB]
lsl LDC, LDC, #3 // ldc = ldc * 8
ble dgemm_kernel_L2_BEGIN
dgemm_kernel_L4_BEGIN:
- mov pCRow0, pC // pCRow0 = C
- add pC, pC, LDC, lsl #2
+ mov pCRow0, pC
+ add pCRow1, pCRow0, LDC
+ add pCRow2, pCRow1, LDC
+ add pCRow3, pCRow2, LDC
+ add pC, pCRow3, LDC
lsl temp, origK, #5 // k * 4 * 8
mov pA, origPA // pA = start of A array
add ppA, temp, pA
+ prfm PLDL1KEEP, [ppA]
//------------------------------------------------------------------------------
cmp counterI, #0
ble dgemm_kernel_L4_M4_BEGIN
+ .align 5
dgemm_kernel_L4_M8_20:
mov pB, origPB
- asr counterL , origK, #1 // L = K / 2
- cmp counterL , #2 // is there at least 4 to do?
+ asr counterL , origK, #2 // L = K / 4
+ cmp counterL , #2
blt dgemm_kernel_L4_M8_32
- KERNEL8x4_I // do one in the K
- KERNEL8x4_M2 // do another in the K
+ KERNEL8x4_I
+ KERNEL8x4_M2
+ KERNEL8x4_M1
+ KERNEL8x4_M2
subs counterL, counterL, #2 // subtract 2
ble dgemm_kernel_L4_M8_22a
- .align 5
+ .align 5
dgemm_kernel_L4_M8_22:
-
+ KERNEL8x4_M1
+ KERNEL8x4_M2
KERNEL8x4_M1
KERNEL8x4_M2
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M8_22
-
+ .align 5
dgemm_kernel_L4_M8_22a:
KERNEL8x4_M1
+ KERNEL8x4_M2
+ KERNEL8x4_M1
KERNEL8x4_E
b dgemm_kernel_L4_M8_44
+ .align 5
dgemm_kernel_L4_M8_32:
tst counterL, #1
ble dgemm_kernel_L4_M8_40
KERNEL8x4_I
-
+ KERNEL8x4_M2
+ KERNEL8x4_M1
KERNEL8x4_E
b dgemm_kernel_L4_M8_44
dgemm_kernel_L4_M8_44:
- ands counterL , origK, #1
+ ands counterL , origK, #3
ble dgemm_kernel_L4_M8_100
+ .align 5
dgemm_kernel_L4_M8_46:
KERNEL8x4_SUB
+ subs counterL, counterL, #1
+ bne dgemm_kernel_L4_M8_46
+
dgemm_kernel_L4_M8_100:
+ lsl temp, origK, #5
+ prfm PLDL1KEEP, [pA, temp]
+ prfm PLDL1KEEP, [ppA, temp]
+ prfm PLDL1KEEP, [origPB]
SAVE8x4
subs counterI, counterI, #1
bne dgemm_kernel_L4_M8_20
-
dgemm_kernel_L4_M4_BEGIN:
mov counterI, origM
tst counterI , #7