CROTKERNEL = rot_vfp.S
ZROTKERNEL = rot_vfp.S
+SDOTKERNEL = sdot_vfp.S
+DDOTKERNEL = ddot_vfp.S
+CDOTKERNEL = cdot_vfp.S
+ZDOTKERNEL = zdot_vfp.S
+
SGEMMKERNEL = ../generic/gemmkernel_4x2.c
ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N))
SGEMMINCOPY = sgemm_ncopy_4_vfp.S
ifeq ($(ARM_ABI),hard)
-SDOTKERNEL = sdot_vfp.S
-DDOTKERNEL = ddot_vfp.S
-CDOTKERNEL = cdot_vfp.S
-ZDOTKERNEL = zdot_vfp.S
-
SNRM2KERNEL = nrm2_vfp.S
DNRM2KERNEL = nrm2_vfp.S
CNRM2KERNEL = nrm2_vfp.S
#define N r0
#define X r1
#define INC_X r2
-#define OLD_Y r3
-
/******************************************************
* [fp, #-128] - [fp, #-64] is reserved
* registers
*******************************************************/
-#define OLD_INC_Y [fp, #4 ]
+#if !defined(__ARM_PCS_VFP)
+#define OLD_RETURN_ADDR r0
+#define OLD_N r1
+#define OLD_X r2
+#define OLD_INC_X r3
+#define OLD_Y [fp, #0 ]
+#define OLD_INC_Y [fp, #4 ]
+#define RETURN_ADDR r8
+#else
+#define OLD_Y r3
+#define OLD_INC_Y [fp, #0 ]
+#endif
#define I r5
#define Y r6
.align 5
push {r4 - r9, fp}
- add fp, sp, #24
+ add fp, sp, #28
sub sp, sp, #STACKSIZE // reserve stack
sub r4, fp, #128
vmov s2, s0
vmov s3, s0
+#if !defined(__ARM_PCS_VFP)
+ mov RETURN_ADDR, OLD_RETURN_ADDR
+ mov N, OLD_N
+ mov X, OLD_X
+ mov INC_X, OLD_INC_X
+ ldr Y, OLD_Y
+ ldr INC_Y, OLD_INC_Y
+#else
mov Y, OLD_Y
ldr INC_Y, OLD_INC_Y
+#endif
cmp N, #0
ble cdot_kernel_L999
cdot_kernel_L999:
-
sub r3, fp, #128
vldm r3, { s8 - s15} // restore floating point registers
vadd.f32 s0 , s0, s2
vsub.f32 s1 , s1, s3
#endif
+#if !defined(__ARM_PCS_VFP)
+ vstm RETURN_ADDR, {s0 - s1}
+#endif
- sub sp, fp, #24
+ sub sp, fp, #28
pop {r4 - r9, fp}
bx lr
vldm r3, { d8 - d15} // restore floating point registers
vadd.f64 d0 , d0, d1 // set return value
+#if !defined(__ARM_PCS_VFP)
+ vmov r0, r1, d0
+#endif
sub sp, fp, #24
pop {r4 - r9, fp}
bx lr
vldm r3, { s8 - s15} // restore floating point registers
#if defined(DSDOT)
-
vadd.f64 d0 , d0, d1 // set return value
-
-#ifdef ARM_SOFTFP_ABI
- vmov r0, r1, d0
+#else
+ vadd.f32 s0 , s0, s1 // set return value
#endif
+#if !defined(__ARM_PCS_VFP)
+#if defined(DSDOT)
+ vmov r0, r1, d0
#else
-
- vadd.f32 s0 , s0, s1 // set return value
-#ifdef ARM_SOFTFP_ABI
vmov r0, s0
#endif
#endif
+
sub sp, fp, #24
pop {r4 - r9, fp}
bx lr
#define N r0
#define X r1
#define INC_X r2
-#define OLD_Y r3
-
/******************************************************
* [fp, #-128] - [fp, #-64] is reserved
* registers
*******************************************************/
-#define OLD_INC_Y [fp, #4 ]
+#if !defined(__ARM_PCS_VFP)
+#define OLD_RETURN_ADDR r0
+#define OLD_N r1
+#define OLD_X r2
+#define OLD_INC_X r3
+#define OLD_Y [fp, #0 ]
+#define OLD_INC_Y [fp, #4 ]
+#define RETURN_ADDR r8
+#else
+#define OLD_Y r3
+#define OLD_INC_Y [fp, #0 ]
+#endif
#define I r5
#define Y r6
.align 5
push {r4 - r9, fp}
- add fp, sp, #24
+ add fp, sp, #28
sub sp, sp, #STACKSIZE // reserve stack
sub r4, fp, #128
vcvt.f64.f32 d2, s0
vcvt.f64.f32 d3, s0
+#if !defined(__ARM_PCS_VFP)
+ mov RETURN_ADDR, OLD_RETURN_ADDR
+ mov N, OLD_N
+ mov X, OLD_X
+ mov INC_X, OLD_INC_X
+ ldr Y, OLD_Y
+ ldr INC_Y, OLD_INC_Y
+#else
mov Y, OLD_Y
ldr INC_Y, OLD_INC_Y
-
+#endif
cmp N, #0
ble zdot_kernel_L999
vadd.f64 d0 , d0, d2
vsub.f64 d1 , d1, d3
#endif
+#if !defined(__ARM_PCS_VFP)
+ vstm RETURN_ADDR, {d0 - d1}
+#endif
- sub sp, fp, #24
+ sub sp, fp, #28
pop {r4 - r9, fp}
bx lr