summaryrefslogtreecommitdiff
path: root/src/UnwindRegistersSave.S
diff options
context:
space:
mode:
Diffstat (limited to 'src/UnwindRegistersSave.S')
-rw-r--r--src/UnwindRegistersSave.S373
1 files changed, 361 insertions, 12 deletions
diff --git a/src/UnwindRegistersSave.S b/src/UnwindRegistersSave.S
index 4583f50..c7c4959 100644
--- a/src/UnwindRegistersSave.S
+++ b/src/UnwindRegistersSave.S
@@ -1,9 +1,8 @@
//===------------------------ UnwindRegistersSave.S -----------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -116,7 +115,7 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
xorl %eax, %eax # return UNW_ESUCCESS
ret
-#elif defined(__mips__) && defined(_ABIO32) && defined(__mips_soft_float)
+#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
#
# extern int unw_getcontext(unw_context_t* thread_state)
@@ -167,12 +166,65 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
sw $8, (4 * 33)($4)
mflo $8
sw $8, (4 * 34)($4)
+#ifdef __mips_hard_float
+#if __mips_fpr != 64
+ sdc1 $f0, (4 * 36 + 8 * 0)($4)
+ sdc1 $f2, (4 * 36 + 8 * 2)($4)
+ sdc1 $f4, (4 * 36 + 8 * 4)($4)
+ sdc1 $f6, (4 * 36 + 8 * 6)($4)
+ sdc1 $f8, (4 * 36 + 8 * 8)($4)
+ sdc1 $f10, (4 * 36 + 8 * 10)($4)
+ sdc1 $f12, (4 * 36 + 8 * 12)($4)
+ sdc1 $f14, (4 * 36 + 8 * 14)($4)
+ sdc1 $f16, (4 * 36 + 8 * 16)($4)
+ sdc1 $f18, (4 * 36 + 8 * 18)($4)
+ sdc1 $f20, (4 * 36 + 8 * 20)($4)
+ sdc1 $f22, (4 * 36 + 8 * 22)($4)
+ sdc1 $f24, (4 * 36 + 8 * 24)($4)
+ sdc1 $f26, (4 * 36 + 8 * 26)($4)
+ sdc1 $f28, (4 * 36 + 8 * 28)($4)
+ sdc1 $f30, (4 * 36 + 8 * 30)($4)
+#else
+ sdc1 $f0, (4 * 36 + 8 * 0)($4)
+ sdc1 $f1, (4 * 36 + 8 * 1)($4)
+ sdc1 $f2, (4 * 36 + 8 * 2)($4)
+ sdc1 $f3, (4 * 36 + 8 * 3)($4)
+ sdc1 $f4, (4 * 36 + 8 * 4)($4)
+ sdc1 $f5, (4 * 36 + 8 * 5)($4)
+ sdc1 $f6, (4 * 36 + 8 * 6)($4)
+ sdc1 $f7, (4 * 36 + 8 * 7)($4)
+ sdc1 $f8, (4 * 36 + 8 * 8)($4)
+ sdc1 $f9, (4 * 36 + 8 * 9)($4)
+ sdc1 $f10, (4 * 36 + 8 * 10)($4)
+ sdc1 $f11, (4 * 36 + 8 * 11)($4)
+ sdc1 $f12, (4 * 36 + 8 * 12)($4)
+ sdc1 $f13, (4 * 36 + 8 * 13)($4)
+ sdc1 $f14, (4 * 36 + 8 * 14)($4)
+ sdc1 $f15, (4 * 36 + 8 * 15)($4)
+ sdc1 $f16, (4 * 36 + 8 * 16)($4)
+ sdc1 $f17, (4 * 36 + 8 * 17)($4)
+ sdc1 $f18, (4 * 36 + 8 * 18)($4)
+ sdc1 $f19, (4 * 36 + 8 * 19)($4)
+ sdc1 $f20, (4 * 36 + 8 * 20)($4)
+ sdc1 $f21, (4 * 36 + 8 * 21)($4)
+ sdc1 $f22, (4 * 36 + 8 * 22)($4)
+ sdc1 $f23, (4 * 36 + 8 * 23)($4)
+ sdc1 $f24, (4 * 36 + 8 * 24)($4)
+ sdc1 $f25, (4 * 36 + 8 * 25)($4)
+ sdc1 $f26, (4 * 36 + 8 * 26)($4)
+ sdc1 $f27, (4 * 36 + 8 * 27)($4)
+ sdc1 $f28, (4 * 36 + 8 * 28)($4)
+ sdc1 $f29, (4 * 36 + 8 * 29)($4)
+ sdc1 $f30, (4 * 36 + 8 * 30)($4)
+ sdc1 $f31, (4 * 36 + 8 * 31)($4)
+#endif
+#endif
jr $31
# return UNW_ESUCCESS
or $2, $0, $0
.set pop
-#elif defined(__mips__) && defined(_ABI64) && defined(__mips_soft_float)
+#elif defined(__mips64)
#
# extern int unw_getcontext(unw_context_t* thread_state)
@@ -223,6 +275,40 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
sd $8, (8 * 33)($4)
mflo $8
sd $8, (8 * 34)($4)
+#ifdef __mips_hard_float
+ sdc1 $f0, (8 * 35)($4)
+ sdc1 $f1, (8 * 36)($4)
+ sdc1 $f2, (8 * 37)($4)
+ sdc1 $f3, (8 * 38)($4)
+ sdc1 $f4, (8 * 39)($4)
+ sdc1 $f5, (8 * 40)($4)
+ sdc1 $f6, (8 * 41)($4)
+ sdc1 $f7, (8 * 42)($4)
+ sdc1 $f8, (8 * 43)($4)
+ sdc1 $f9, (8 * 44)($4)
+ sdc1 $f10, (8 * 45)($4)
+ sdc1 $f11, (8 * 46)($4)
+ sdc1 $f12, (8 * 47)($4)
+ sdc1 $f13, (8 * 48)($4)
+ sdc1 $f14, (8 * 49)($4)
+ sdc1 $f15, (8 * 50)($4)
+ sdc1 $f16, (8 * 51)($4)
+ sdc1 $f17, (8 * 52)($4)
+ sdc1 $f18, (8 * 53)($4)
+ sdc1 $f19, (8 * 54)($4)
+ sdc1 $f20, (8 * 55)($4)
+ sdc1 $f21, (8 * 56)($4)
+ sdc1 $f22, (8 * 57)($4)
+ sdc1 $f23, (8 * 58)($4)
+ sdc1 $f24, (8 * 59)($4)
+ sdc1 $f25, (8 * 60)($4)
+ sdc1 $f26, (8 * 61)($4)
+ sdc1 $f27, (8 * 62)($4)
+ sdc1 $f28, (8 * 63)($4)
+ sdc1 $f29, (8 * 64)($4)
+ sdc1 $f30, (8 * 65)($4)
+ sdc1 $f31, (8 * 66)($4)
+#endif
jr $31
# return UNW_ESUCCESS
or $2, $0, $0
@@ -237,6 +323,237 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
teq $0, $0
+#elif defined(__powerpc64__)
+
+//
+// extern int unw_getcontext(unw_context_t* thread_state)
+//
+// On entry:
+// thread_state pointer is in r3
+//
+DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
+
+// store register (GPR)
+#define PPC64_STR(n) \
+ std %r##n, (8 * (n + 2))(%r3)
+
+ // save GPRs
+ PPC64_STR(0)
+ mflr %r0
+ std %r0, PPC64_OFFS_SRR0(%r3) // store lr as ssr0
+ PPC64_STR(1)
+ PPC64_STR(2)
+ PPC64_STR(3)
+ PPC64_STR(4)
+ PPC64_STR(5)
+ PPC64_STR(6)
+ PPC64_STR(7)
+ PPC64_STR(8)
+ PPC64_STR(9)
+ PPC64_STR(10)
+ PPC64_STR(11)
+ PPC64_STR(12)
+ PPC64_STR(13)
+ PPC64_STR(14)
+ PPC64_STR(15)
+ PPC64_STR(16)
+ PPC64_STR(17)
+ PPC64_STR(18)
+ PPC64_STR(19)
+ PPC64_STR(20)
+ PPC64_STR(21)
+ PPC64_STR(22)
+ PPC64_STR(23)
+ PPC64_STR(24)
+ PPC64_STR(25)
+ PPC64_STR(26)
+ PPC64_STR(27)
+ PPC64_STR(28)
+ PPC64_STR(29)
+ PPC64_STR(30)
+ PPC64_STR(31)
+
+ mfcr %r0
+ std %r0, PPC64_OFFS_CR(%r3)
+ mfxer %r0
+ std %r0, PPC64_OFFS_XER(%r3)
+ mflr %r0
+ std %r0, PPC64_OFFS_LR(%r3)
+ mfctr %r0
+ std %r0, PPC64_OFFS_CTR(%r3)
+ mfvrsave %r0
+ std %r0, PPC64_OFFS_VRSAVE(%r3)
+
+#ifdef PPC64_HAS_VMX
+ // save VS registers
+ // (note that this also saves floating point registers and V registers,
+ // because part of VS is mapped to these registers)
+
+ addi %r4, %r3, PPC64_OFFS_FP
+
+// store VS register
+#define PPC64_STVS(n) \
+ stxvd2x %vs##n, 0, %r4 ;\
+ addi %r4, %r4, 16
+
+ PPC64_STVS(0)
+ PPC64_STVS(1)
+ PPC64_STVS(2)
+ PPC64_STVS(3)
+ PPC64_STVS(4)
+ PPC64_STVS(5)
+ PPC64_STVS(6)
+ PPC64_STVS(7)
+ PPC64_STVS(8)
+ PPC64_STVS(9)
+ PPC64_STVS(10)
+ PPC64_STVS(11)
+ PPC64_STVS(12)
+ PPC64_STVS(13)
+ PPC64_STVS(14)
+ PPC64_STVS(15)
+ PPC64_STVS(16)
+ PPC64_STVS(17)
+ PPC64_STVS(18)
+ PPC64_STVS(19)
+ PPC64_STVS(20)
+ PPC64_STVS(21)
+ PPC64_STVS(22)
+ PPC64_STVS(23)
+ PPC64_STVS(24)
+ PPC64_STVS(25)
+ PPC64_STVS(26)
+ PPC64_STVS(27)
+ PPC64_STVS(28)
+ PPC64_STVS(29)
+ PPC64_STVS(30)
+ PPC64_STVS(31)
+ PPC64_STVS(32)
+ PPC64_STVS(33)
+ PPC64_STVS(34)
+ PPC64_STVS(35)
+ PPC64_STVS(36)
+ PPC64_STVS(37)
+ PPC64_STVS(38)
+ PPC64_STVS(39)
+ PPC64_STVS(40)
+ PPC64_STVS(41)
+ PPC64_STVS(42)
+ PPC64_STVS(43)
+ PPC64_STVS(44)
+ PPC64_STVS(45)
+ PPC64_STVS(46)
+ PPC64_STVS(47)
+ PPC64_STVS(48)
+ PPC64_STVS(49)
+ PPC64_STVS(50)
+ PPC64_STVS(51)
+ PPC64_STVS(52)
+ PPC64_STVS(53)
+ PPC64_STVS(54)
+ PPC64_STVS(55)
+ PPC64_STVS(56)
+ PPC64_STVS(57)
+ PPC64_STVS(58)
+ PPC64_STVS(59)
+ PPC64_STVS(60)
+ PPC64_STVS(61)
+ PPC64_STVS(62)
+ PPC64_STVS(63)
+
+#else
+
+// store FP register
+#define PPC64_STF(n) \
+ stfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
+
+ // save float registers
+ PPC64_STF(0)
+ PPC64_STF(1)
+ PPC64_STF(2)
+ PPC64_STF(3)
+ PPC64_STF(4)
+ PPC64_STF(5)
+ PPC64_STF(6)
+ PPC64_STF(7)
+ PPC64_STF(8)
+ PPC64_STF(9)
+ PPC64_STF(10)
+ PPC64_STF(11)
+ PPC64_STF(12)
+ PPC64_STF(13)
+ PPC64_STF(14)
+ PPC64_STF(15)
+ PPC64_STF(16)
+ PPC64_STF(17)
+ PPC64_STF(18)
+ PPC64_STF(19)
+ PPC64_STF(20)
+ PPC64_STF(21)
+ PPC64_STF(22)
+ PPC64_STF(23)
+ PPC64_STF(24)
+ PPC64_STF(25)
+ PPC64_STF(26)
+ PPC64_STF(27)
+ PPC64_STF(28)
+ PPC64_STF(29)
+ PPC64_STF(30)
+ PPC64_STF(31)
+
+ // save vector registers
+
+ // Use 16-bytes below the stack pointer as an
+ // aligned buffer to save each vector register.
+ // Note that the stack pointer is always 16-byte aligned.
+ subi %r4, %r1, 16
+
+#define PPC64_STV_UNALIGNED(n) \
+ stvx %v##n, 0, %r4 ;\
+ ld %r5, 0(%r4) ;\
+ std %r5, (PPC64_OFFS_V + n * 16)(%r3) ;\
+ ld %r5, 8(%r4) ;\
+ std %r5, (PPC64_OFFS_V + n * 16 + 8)(%r3)
+
+ PPC64_STV_UNALIGNED(0)
+ PPC64_STV_UNALIGNED(1)
+ PPC64_STV_UNALIGNED(2)
+ PPC64_STV_UNALIGNED(3)
+ PPC64_STV_UNALIGNED(4)
+ PPC64_STV_UNALIGNED(5)
+ PPC64_STV_UNALIGNED(6)
+ PPC64_STV_UNALIGNED(7)
+ PPC64_STV_UNALIGNED(8)
+ PPC64_STV_UNALIGNED(9)
+ PPC64_STV_UNALIGNED(10)
+ PPC64_STV_UNALIGNED(11)
+ PPC64_STV_UNALIGNED(12)
+ PPC64_STV_UNALIGNED(13)
+ PPC64_STV_UNALIGNED(14)
+ PPC64_STV_UNALIGNED(15)
+ PPC64_STV_UNALIGNED(16)
+ PPC64_STV_UNALIGNED(17)
+ PPC64_STV_UNALIGNED(18)
+ PPC64_STV_UNALIGNED(19)
+ PPC64_STV_UNALIGNED(20)
+ PPC64_STV_UNALIGNED(21)
+ PPC64_STV_UNALIGNED(22)
+ PPC64_STV_UNALIGNED(23)
+ PPC64_STV_UNALIGNED(24)
+ PPC64_STV_UNALIGNED(25)
+ PPC64_STV_UNALIGNED(26)
+ PPC64_STV_UNALIGNED(27)
+ PPC64_STV_UNALIGNED(28)
+ PPC64_STV_UNALIGNED(29)
+ PPC64_STV_UNALIGNED(30)
+ PPC64_STV_UNALIGNED(31)
+
+#endif
+
+ li %r3, 0 // return UNW_ESUCCESS
+ blr
+
+
#elif defined(__ppc__)
;
@@ -490,7 +807,7 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
#if defined(__ELF__)
.fpu vfpv3-d16
#endif
-DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPy)
+DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
vstmia r0, {d0-d15}
JMP(lr)
@@ -504,7 +821,7 @@ DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMD
#if defined(__ELF__)
.fpu vfpv3-d16
#endif
-DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPy)
+DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
JMP(lr)
@@ -518,7 +835,7 @@ DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMX
#if defined(__ELF__)
.fpu vfpv3
#endif
-DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPy)
+DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
@ VFP and iwMMX instructions are only available when compiling with the flags
@ that enable them. We do not want to do that in the library (because we do not
@ want the compiler to generate instructions that access those) but this is
@@ -541,7 +858,7 @@ DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPy)
#if defined(__ELF__)
.arch armv5te
#endif
-DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPy)
+DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
stcl p1, cr0, [r0], #8 @ wstrd wR0, [r0], #8
stcl p1, cr1, [r0], #8 @ wstrd wR1, [r0], #8
stcl p1, cr2, [r0], #8 @ wstrd wR2, [r0], #8
@@ -620,9 +937,41 @@ DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
l.sw 116(r3), r29
l.sw 120(r3), r30
l.sw 124(r3), r31
-#endif
+ # store ra to pc
+ l.sw 128(r3), r9
+ # zero epcr
+ l.sw 132(r3), r0
+
+#elif defined(__sparc__)
+#
+# extern int unw_getcontext(unw_context_t* thread_state)
+#
+# On entry:
+# thread_state pointer is in o0
+#
+DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
+ ta 3
+ add %o7, 8, %o7
+ std %g0, [%o0 + 0]
+ std %g2, [%o0 + 8]
+ std %g4, [%o0 + 16]
+ std %g6, [%o0 + 24]
+ std %o0, [%o0 + 32]
+ std %o2, [%o0 + 40]
+ std %o4, [%o0 + 48]
+ std %o6, [%o0 + 56]
+ std %l0, [%o0 + 64]
+ std %l2, [%o0 + 72]
+ std %l4, [%o0 + 80]
+ std %l6, [%o0 + 88]
+ std %i0, [%o0 + 96]
+ std %i2, [%o0 + 104]
+ std %i4, [%o0 + 112]
+ std %i6, [%o0 + 120]
+ jmp %o7
+ clr %o0 // return UNW_ESUCCESS
+#endif
#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
NO_EXEC_STACK_DIRECTIVE
-