* Remove misnamed `PPC64_HAS_VMX` in preference of directly checking `defined(__VSX__)`.
libunwind was using "VMX" to mean "VSX". "VMX" is just another name for Altivec, while "VSX" is the vector-scalar extensions first used in POWER7. Exposing a "PPC64_HAS_VMX" define was misleading and incorrect.
* Add `defined(__ALTIVEC__)` guards around vector register operations to fix non-altivec CPUS such as the e5500.
When compiling for certain Book-E processors such as the e5500, we want to skip vector save/restore, as the Altivec registers are illegal on non-Altivec implementations.
* Add `!defined(__NO_FPRS__)` guards around traditional floating-point save/restore.
When compiling for powerpcspe, we cannot access floating point registers, as there aren't any. (The SPE on e500v2 is a 64-bit extension of the GPRs, and it doesn't have the normal floating-point registers at all.)
This fixes building for powerpcspe, although no actual handling for SPE save/restore is written yet.
Reviewed By: MaskRay, #libunwind, compnerd
Differential Revision: https://reviews.llvm.org/D91906
}
inline bool Registers_ppc64::validVectorRegister(int regNum) const {
-#ifdef PPC64_HAS_VMX
+#if defined(__VSX__)
if (regNum >= UNW_PPC64_VS0 && regNum <= UNW_PPC64_VS31)
return true;
if (regNum >= UNW_PPC64_VS32 && regNum <= UNW_PPC64_VS63)
return true;
-#else
+#elif defined(__ALTIVEC__)
if (regNum >= UNW_PPC64_V0 && regNum <= UNW_PPC64_V31)
return true;
#endif
PPC64_LR(30)
PPC64_LR(31)
-#ifdef PPC64_HAS_VMX
+#if defined(__VSX__)
// restore VS registers
// (note that this also restores floating point registers and V registers,
PPC64_LF(30)
PPC64_LF(31)
+#if defined(__ALTIVEC__)
// restore vector registers if any are in use
ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
cmpwi %r5, 0
PPC64_CLV_UNALIGNEDh(31)
#endif
+#endif
Lnovec:
ld %r0, PPC64_OFFS_CR(%r3)
lwz %r30,128(%r3)
lwz %r31,132(%r3)
+#ifndef __NO_FPRS__
// restore float registers
lfd %f0, 160(%r3)
lfd %f1, 168(%r3)
lfd %f29,392(%r3)
lfd %f30,400(%r3)
lfd %f31,408(%r3)
+#endif
+#if defined(__ALTIVEC__)
// restore vector registers if any are in use
lwz %r5, 156(%r3) // test VRsave
cmpwi %r5, 0
LOAD_VECTOR_UNALIGNEDh(29)
LOAD_VECTOR_UNALIGNEDh(30)
LOAD_VECTOR_UNALIGNEDh(31)
+#endif
Lnovec:
lwz %r0, 136(%r3) // __cr
mfvrsave %r0
std %r0, PPC64_OFFS_VRSAVE(%r3)
-#ifdef PPC64_HAS_VMX
+#if defined(__VSX__)
// save VS registers
// (note that this also saves floating point registers and V registers,
// because part of VS is mapped to these registers)
PPC64_STF(30)
PPC64_STF(31)
+#if defined(__ALTIVEC__)
// save vector registers
// Use 16-bytes below the stack pointer as an
PPC64_STV_UNALIGNED(31)
#endif
+#endif
li %r3, 0 // return UNW_ESUCCESS
blr
mfctr %r0
stw %r0, 148(%r3)
+#if !defined(__NO_FPRS__)
// save float registers
stfd %f0, 160(%r3)
stfd %f1, 168(%r3)
stfd %f29,392(%r3)
stfd %f30,400(%r3)
stfd %f31,408(%r3)
+#endif
-
+#if defined(__ALTIVEC__)
// save vector registers
subi %r4, %r1, 16
SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0)
SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0)
SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0)
+#endif
li %r3, 0 // return UNW_ESUCCESS
blr
#define PPC64_OFFS_VRSAVE 304
#define PPC64_OFFS_FP 312
#define PPC64_OFFS_V 824
-#ifdef _ARCH_PWR8
-#define PPC64_HAS_VMX
-#endif
#elif defined(__APPLE__) && defined(__aarch64__)
#define SEPARATOR %%
#else
#endif
#endif
-#if defined(__powerpc64__) && defined(_ARCH_PWR8)
-#define PPC64_HAS_VMX
-#endif
-
#ifndef _LIBUNWIND_REMEMBER_HEAP_ALLOC
#if defined(_LIBUNWIND_REMEMBER_STACK_ALLOC) || defined(__APPLE__) || \
defined(__linux__) || defined(__ANDROID__) || defined(__MINGW32__) || \