powerpc/setjmp: Add 64-bit support
Use 64-bit store/load instructions to save/restore the general-purpose registers.
This commit is contained in:
parent
5230eb7f8c
commit
d9dc88048a
|
@ -173,11 +173,19 @@ _BEGIN_STD_C
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __PPC__
|
#ifdef __PPC__
|
||||||
|
#ifdef __powerpc64__
|
||||||
|
#ifdef __ALTIVEC__
|
||||||
|
#define _JBLEN 70
|
||||||
|
#else
|
||||||
|
#define _JBLEN 43
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
#ifdef __ALTIVEC__
|
#ifdef __ALTIVEC__
|
||||||
#define _JBLEN 64
|
#define _JBLEN 64
|
||||||
#else
|
#else
|
||||||
#define _JBLEN 32
|
#define _JBLEN 32
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
#define _JBTYPE double
|
#define _JBTYPE double
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
/* This is a simple version of setjmp and longjmp for the PowerPC.
|
/* This is a simple version of setjmp and longjmp for the PowerPC.
|
||||||
Ian Lance Taylor, Cygnus Support, 9 Feb 1994.
|
Ian Lance Taylor, Cygnus Support, 9 Feb 1994.
|
||||||
Modified by Jeff Johnston, Red Hat Inc. 2 Oct 2001. */
|
Modified by Jeff Johnston, Red Hat Inc. 2 Oct 2001.
|
||||||
|
Modified by Sebastian Huber, embedded brains GmbH. 22 Sep 2022. */
|
||||||
|
|
||||||
#include "ppc-asm.h"
|
#include "ppc-asm.h"
|
||||||
|
|
||||||
|
@ -40,6 +41,31 @@ FUNC_START(setjmp)
|
||||||
stored. Note that we are not adding 168 because the next
|
stored. Note that we are not adding 168 because the next
|
||||||
store instruction uses an offset of 4. */
|
store instruction uses an offset of 4. */
|
||||||
addi 3,3,164
|
addi 3,3,164
|
||||||
|
#elif __powerpc64__
|
||||||
|
/* In the first store, add 16 to r3 so that the subsequent floating
|
||||||
|
point stores are aligned on an 8 byte boundary and the Altivec
|
||||||
|
stores are aligned on a 16 byte boundary. */
|
||||||
|
stdu 1,16(3) # offset 16
|
||||||
|
stdu 2,8(3) # offset 24
|
||||||
|
stdu 13,8(3) # offset 32
|
||||||
|
stdu 14,8(3) # offset 40
|
||||||
|
stdu 15,8(3) # offset 48
|
||||||
|
stdu 16,8(3) # offset 56
|
||||||
|
stdu 17,8(3) # offset 64
|
||||||
|
stdu 18,8(3) # offset 72
|
||||||
|
stdu 19,8(3) # offset 80
|
||||||
|
stdu 20,8(3) # offset 88
|
||||||
|
stdu 21,8(3) # offset 96
|
||||||
|
stdu 22,8(3) # offset 104
|
||||||
|
stdu 23,8(3) # offset 112
|
||||||
|
stdu 24,8(3) # offset 120
|
||||||
|
stdu 25,8(3) # offset 128
|
||||||
|
stdu 26,8(3) # offset 136
|
||||||
|
stdu 27,8(3) # offset 144
|
||||||
|
stdu 28,8(3) # offset 152
|
||||||
|
stdu 29,8(3) # offset 160
|
||||||
|
stdu 30,8(3) # offset 168
|
||||||
|
stdu 31,8(3) # offset 176
|
||||||
#else
|
#else
|
||||||
stw 1,0(3) # offset 0
|
stw 1,0(3) # offset 0
|
||||||
stwu 2,4(3) # offset 4
|
stwu 2,4(3) # offset 4
|
||||||
|
@ -64,10 +90,15 @@ FUNC_START(setjmp)
|
||||||
stwu 31,4(3) # offset 80
|
stwu 31,4(3) # offset 80
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* From this point on until the end of this function, add 84
|
/* If __SPE__, then add 84 to the offset shown from this point on until
|
||||||
to the offset shown if __SPE__. This difference comes from
|
the end of this function. This difference comes from the fact that
|
||||||
the fact that we save 21 64-bit registers instead of 21
|
we save 21 64-bit registers instead of 21 32-bit registers above.
|
||||||
32-bit registers above. */
|
|
||||||
|
If __powerpc64__, then add 96 to the offset shown from this point on until
|
||||||
|
the end of this function. This difference comes from the fact that
|
||||||
|
we save 21 64-bit registers instead of 21 32-bit registers above and
|
||||||
|
we take alignement requirements of floating point and Altivec stores
|
||||||
|
into account. */
|
||||||
mflr 4
|
mflr 4
|
||||||
stwu 4,4(3) # offset 84
|
stwu 4,4(3) # offset 84
|
||||||
mfcr 4
|
mfcr 4
|
||||||
|
@ -188,6 +219,31 @@ FUNC_START(longjmp)
|
||||||
loaded. Note that we are not adding 168 because the next
|
loaded. Note that we are not adding 168 because the next
|
||||||
load instruction uses an offset of 4. */
|
load instruction uses an offset of 4. */
|
||||||
addi 3,3,164
|
addi 3,3,164
|
||||||
|
#elif __powerpc64__
|
||||||
|
/* In the first load, add 16 to r3 so that the subsequent floating
|
||||||
|
point loades are aligned on an 8 byte boundary and the Altivec
|
||||||
|
loads are aligned on a 16 byte boundary. */
|
||||||
|
ldu 1,16(3) # offset 16
|
||||||
|
ldu 2,8(3) # offset 24
|
||||||
|
ldu 13,8(3) # offset 32
|
||||||
|
ldu 14,8(3) # offset 40
|
||||||
|
ldu 15,8(3) # offset 48
|
||||||
|
ldu 16,8(3) # offset 56
|
||||||
|
ldu 17,8(3) # offset 64
|
||||||
|
ldu 18,8(3) # offset 72
|
||||||
|
ldu 19,8(3) # offset 80
|
||||||
|
ldu 20,8(3) # offset 88
|
||||||
|
ldu 21,8(3) # offset 96
|
||||||
|
ldu 22,8(3) # offset 104
|
||||||
|
ldu 23,8(3) # offset 112
|
||||||
|
ldu 24,8(3) # offset 120
|
||||||
|
ldu 25,8(3) # offset 128
|
||||||
|
ldu 26,8(3) # offset 136
|
||||||
|
ldu 27,8(3) # offset 144
|
||||||
|
ldu 28,8(3) # offset 152
|
||||||
|
ldu 29,8(3) # offset 160
|
||||||
|
ldu 30,8(3) # offset 168
|
||||||
|
ldu 31,8(3) # offset 176
|
||||||
#else
|
#else
|
||||||
lwz 1,0(3) # offset 0
|
lwz 1,0(3) # offset 0
|
||||||
lwzu 2,4(3) # offset 4
|
lwzu 2,4(3) # offset 4
|
||||||
|
@ -211,10 +267,15 @@ FUNC_START(longjmp)
|
||||||
lwzu 30,4(3) # offset 76
|
lwzu 30,4(3) # offset 76
|
||||||
lwzu 31,4(3) # offset 80
|
lwzu 31,4(3) # offset 80
|
||||||
#endif
|
#endif
|
||||||
/* From this point on until the end of this function, add 84
|
/* If __SPE__, then add 84 to the offset shown from this point on until
|
||||||
to the offset shown if __SPE__. This difference comes from
|
the end of this function. This difference comes from the fact that
|
||||||
the fact that we restore 21 64-bit registers instead of 21
|
we restore 21 64-bit registers instead of 21 32-bit registers above.
|
||||||
32-bit registers above. */
|
|
||||||
|
If __powerpc64__, then add 96 to the offset shown from this point on until
|
||||||
|
the end of this function. This difference comes from the fact that
|
||||||
|
we restore 21 64-bit registers instead of 21 32-bit registers above and
|
||||||
|
we take alignement requirements of floating point and Altivec loads
|
||||||
|
into account. */
|
||||||
lwzu 5,4(3) # offset 84
|
lwzu 5,4(3) # offset 84
|
||||||
mtlr 5
|
mtlr 5
|
||||||
lwzu 5,4(3) # offset 88
|
lwzu 5,4(3) # offset 88
|
||||||
|
|
Loading…
Reference in New Issue