ARC: Use new definitions for optional ARC CPU features

GCC for ARC has been updated to provide consistent naming of preprocessor
definitions for different optional architecture features:

    * __ARC_BARREL_SHIFTER__ instead of __Xbarrel_shifter for
      -mbarrel-shifter
    * __ARC_LL64__ instead of __LL64__ for -mll64
    * __ARCEM__ instead of __EM__ for -mcpu=arcem
    * __ARCHS__ instead of __HS__ for -mcpu=archs
    * etc (not used in newlib)

This patch updates assembly routines for ARC to use new definitions instead
of a deprecated ones. To ensure compatibility with older compiler new
definitions are also defined in asm.h if needed, based on deprecated
preprocessor definitions.

*** newlib/ChangeLog ***
2015-12-15  Anton Kolesov  <Anton.Kolesov@synopsys.com>

	* libc/machine/arc/asm.h: Define new GCC definition for old compiler.
	* libc/machine/arc/memcmp-bs-norm.S: Use new GCC defines to detect
	  processor features.
	* libc/machine/arc/memcmp.S: Likewise.
	* libc/machine/arc/memcpy-archs.S: Likewise.
	* libc/machine/arc/memcpy-bs.S: Likewise.
	* libc/machine/arc/memcpy.S: Likewise.
	* libc/machine/arc/memset-archs.S: Likewise.
	* libc/machine/arc/memset-bs.S: Likewise.
	* libc/machine/arc/memset.S: Likewise.
	* libc/machine/arc/setjmp.S: Likewise.
	* libc/machine/arc/strchr-bs-norm.S: Likewise.
	* libc/machine/arc/strchr-bs.S: Likewise.
	* libc/machine/arc/strchr.S: Likewise.
	* libc/machine/arc/strcmp-archs.S: Likewise.
	* libc/machine/arc/strcmp.S: Likewise.
	* libc/machine/arc/strcpy-bs-arc600.S: Likewise.
	* libc/machine/arc/strcpy-bs.S: Likewise.
	* libc/machine/arc/strcpy.S: Likewise.
	* libc/machine/arc/strlen-bs-norm.S: Likewise.
	* libc/machine/arc/strlen-bs.S: Likewise.
	* libc/machine/arc/strlen.S: Likewise.
	* libc/machine/arc/strncpy-bs.S: Likewise.
	* libc/machine/arc/strncpy.S: Likewise.

Signed-off-by: Anton Kolesov <Anton.Kolesov@synopsys.com>
This commit is contained in:
Anton Kolesov 2015-12-15 20:54:58 +03:00 committed by Corinna Vinschen
parent 088f7a7239
commit 06537f05d4
24 changed files with 126 additions and 69 deletions

View File

@ -1,3 +1,30 @@
2015-12-17 Anton Kolesov <Anton.Kolesov@synopsys.com>
* libc/machine/arc/asm.h: Define new GCC definition for old compiler.
* libc/machine/arc/memcmp-bs-norm.S: Use new GCC defines to detect
processor features.
* libc/machine/arc/memcmp.S: Likewise.
* libc/machine/arc/memcpy-archs.S: Likewise.
* libc/machine/arc/memcpy-bs.S: Likewise.
* libc/machine/arc/memcpy.S: Likewise.
* libc/machine/arc/memset-archs.S: Likewise.
* libc/machine/arc/memset-bs.S: Likewise.
* libc/machine/arc/memset.S: Likewise.
* libc/machine/arc/setjmp.S: Likewise.
* libc/machine/arc/strchr-bs-norm.S: Likewise.
* libc/machine/arc/strchr-bs.S: Likewise.
* libc/machine/arc/strchr.S: Likewise.
* libc/machine/arc/strcmp-archs.S: Likewise.
* libc/machine/arc/strcmp.S: Likewise.
* libc/machine/arc/strcpy-bs-arc600.S: Likewise.
* libc/machine/arc/strcpy-bs.S: Likewise.
* libc/machine/arc/strcpy.S: Likewise.
* libc/machine/arc/strlen-bs-norm.S: Likewise.
* libc/machine/arc/strlen-bs.S: Likewise.
* libc/machine/arc/strlen.S: Likewise.
* libc/machine/arc/strncpy-bs.S: Likewise.
* libc/machine/arc/strncpy.S: Likewise.
2015-12-17 Corinna Vinschen <corinna@vinschen.de> 2015-12-17 Corinna Vinschen <corinna@vinschen.de>
* libc/include/sys/types.h: Remove including <sys/select.h>. * libc/include/sys/types.h: Remove including <sys/select.h>.

View File

@ -61,4 +61,22 @@
#define bcc_s bhs_s #define bcc_s bhs_s
/* Compatibility with older ARC GCC, that doesn't provide some of the
preprocessor defines used by newlib for ARC. */
#if defined (__Xbarrel_shifter) && !defined (__ARC_BARREL_SHIFTER__)
#define __ARC_BARREL_SHIFTER__ 1
#endif
#if defined (__EM__) && !defined (__ARCEM__)
#define __ARCEM__ 1
#endif
#if defined (__HS__) && !defined (__ARCHS__)
#define __ARCHS__ 1
#endif
#if defined (__LL64__) && !defined (__ARC_LL64__)
#define __ARC_LL64__ 1
#endif
#endif /* ARC_NEWLIB_ASM_H */ #endif /* ARC_NEWLIB_ASM_H */

View File

@ -35,7 +35,9 @@
#include "asm.h" #include "asm.h"
#if !defined (__ARC601__) && defined (__ARC_NORM__) && defined (__Xbarrel_shifter) #if !defined (__ARC601__) && defined (__ARC_NORM__) \
&& defined (__ARC_BARREL_SHIFTER__)
#ifdef __LITTLE_ENDIAN__ #ifdef __LITTLE_ENDIAN__
#define WORD2 r2 #define WORD2 r2
#define SHIFT r3 #define SHIFT r3
@ -47,7 +49,7 @@
ENTRY (memcmp) ENTRY (memcmp)
or r12,r0,r1 or r12,r0,r1
asl_s r12,r12,30 asl_s r12,r12,30
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__) #if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
sub_l r3,r2,1 sub_l r3,r2,1
brls r2,r12,.Lbytewise brls r2,r12,.Lbytewise
#else #else
@ -57,7 +59,7 @@ ENTRY (memcmp)
ld r4,[r0,0] ld r4,[r0,0]
ld r5,[r1,0] ld r5,[r1,0]
lsr.f lp_count,r3,3 lsr.f lp_count,r3,3
#ifdef __EM__ #ifdef __ARCEM__
/* A branch can't be the last instruction in a zero overhead loop. /* A branch can't be the last instruction in a zero overhead loop.
So we move the branch to the start of the loop, duplicate it So we move the branch to the start of the loop, duplicate it
after the end, and set up r12 so that the branch isn't taken after the end, and set up r12 so that the branch isn't taken
@ -74,12 +76,12 @@ ENTRY (memcmp)
brne r4,r5,.Leven brne r4,r5,.Leven
ld.a r4,[r0,8] ld.a r4,[r0,8]
ld.a r5,[r1,8] ld.a r5,[r1,8]
#ifdef __EM__ #ifdef __ARCEM__
.Loop_end: .Loop_end:
brne WORD2,r12,.Lodd brne WORD2,r12,.Lodd
#else #else
brne WORD2,r12,.Lodd brne WORD2,r12,.Lodd
#ifdef __HS__ #ifdef __ARCHS__
nop nop
#endif #endif
.Loop_end: .Loop_end:
@ -90,7 +92,7 @@ ENTRY (memcmp)
ld r4,[r0,4] ld r4,[r0,4]
ld r5,[r1,4] ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__ #ifdef __LITTLE_ENDIAN__
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__) #if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
nop_s nop_s
; one more load latency cycle ; one more load latency cycle
.Last_cmp: .Last_cmp:
@ -167,14 +169,14 @@ ENTRY (memcmp)
bset.cs r0,r0,31 bset.cs r0,r0,31
.Lodd: .Lodd:
cmp_s WORD2,r12 cmp_s WORD2,r12
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__) #if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
mov_s r0,1 mov_s r0,1
j_s.d [blink] j_s.d [blink]
bset.cs r0,r0,31 bset.cs r0,r0,31
#else /* !__ARC700__ */ #else
j_s.d [blink] j_s.d [blink]
rrc r0,2 rrc r0,2
#endif /* !__ARC700__ */ #endif /* __ARC700__ || __ARCEM__ || __ARCHS__ */
#endif /* ENDIAN */ #endif /* ENDIAN */
.balign 4 .balign 4
.Lbytewise: .Lbytewise:
@ -182,7 +184,7 @@ ENTRY (memcmp)
ldb r4,[r0,0] ldb r4,[r0,0]
ldb r5,[r1,0] ldb r5,[r1,0]
lsr.f lp_count,r3 lsr.f lp_count,r3
#ifdef __EM__ #ifdef __ARCEM__
mov r12,r3 mov r12,r3
lpne .Lbyte_end lpne .Lbyte_end
brne r3,r12,.Lbyte_odd brne r3,r12,.Lbyte_odd
@ -194,12 +196,12 @@ ENTRY (memcmp)
brne r4,r5,.Lbyte_even brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2] ldb.a r4,[r0,2]
ldb.a r5,[r1,2] ldb.a r5,[r1,2]
#ifdef __EM__ #ifdef __ARCEM__
.Lbyte_end: .Lbyte_end:
brne r3,r12,.Lbyte_odd brne r3,r12,.Lbyte_odd
#else #else
brne r3,r12,.Lbyte_odd brne r3,r12,.Lbyte_odd
#ifdef __HS__ #ifdef __ARCHS__
nop nop
#endif #endif
.Lbyte_end: .Lbyte_end:
@ -218,6 +220,6 @@ ENTRY (memcmp)
j_s.d [blink] j_s.d [blink]
mov_l r0,0 mov_l r0,0
ENDFUNC (memcmp) ENDFUNC (memcmp)
#endif /* !__ARC601__ && __ARC_NORM__ && __Xbarrel_shifter */ #endif /* !__ARC601__ && __ARC_NORM__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,8 @@
#include "asm.h" #include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_NORM__) || !defined (__Xbarrel_shifter) #if defined (__ARC601__) || !defined (__ARC_NORM__) \
|| !defined (__ARC_BARREL_SHIFTER__)
/* Addresses are unsigned, and at 0 is the vector table, so it's OK to assume /* Addresses are unsigned, and at 0 is the vector table, so it's OK to assume
that we can subtract 8 from a source end address without underflow. */ that we can subtract 8 from a source end address without underflow. */
@ -148,6 +149,6 @@ ENTRY (memcmp)
j_s.d [blink] j_s.d [blink]
mov_s r0,0 mov_s r0,0
ENDFUNC (memcmp) ENDFUNC (memcmp)
#endif /* __ARC601__ || !__ARC_NORM__ || !__Xbarrel_shifter */ #endif /* __ARC601__ || !__ARC_NORM__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h" #include "asm.h"
#if defined (__HS__) #if defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__ #ifdef __LITTLE_ENDIAN__
# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; << # define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
@ -53,7 +53,7 @@
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08 # define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
#endif #endif
#ifdef __LL64__ #ifdef __ARC_LL64__
# define PREFETCH_READ(RX) prefetch [RX, 56] # define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64] # define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define LOADX(DST,RX) ldd.ab DST, [RX, 8]
@ -263,6 +263,6 @@ ENTRY (memcpy)
j [blink] j [blink]
ENDFUNC (memcpy) ENDFUNC (memcpy)
#endif /* __HS__ */ #endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,9 @@
#include "asm.h" #include "asm.h"
#if !defined (__ARC601__) && !defined (__HS__) && defined (__Xbarrel_shifter) #if !defined (__ARC601__) && !defined (__ARCHS__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* Mostly optimized for ARC700, but not bad for ARC600 either. */ /* Mostly optimized for ARC700, but not bad for ARC600 either. */
/* This memcpy implementation does not support objects of 1GB or larger - /* This memcpy implementation does not support objects of 1GB or larger -
the check for alignment does not work then. */ the check for alignment does not work then. */
@ -98,6 +100,6 @@ ENTRY (memcpy)
j_s.d [blink] j_s.d [blink]
stb r12,[r5,0] stb r12,[r5,0]
ENDFUNC (memcpy) ENDFUNC (memcpy)
#endif /* !__ARC601__ && !__HS__ && __Xbarrel_shifter */ #endif /* !__ARC601__ && !__ARCHS__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,9 @@
#include "asm.h" #include "asm.h"
#if defined (__ARC601__) || (!defined (__Xbarrel_shifter) && !defined (__HS__)) #if defined (__ARC601__) || \
(!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* Adapted from memcpy-bs.S. */ /* Adapted from memcpy-bs.S. */
/* We assume that most sources and destinations are aligned, and /* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser that also lengths are mostly a multiple of four, although to a lesser
@ -104,6 +106,6 @@ ENTRY (memcpy)
j_s.d [blink] j_s.d [blink]
stb r12,[r5,0] stb r12,[r5,0]
ENDFUNC (memcpy) ENDFUNC (memcpy)
#endif /* __ARC601__ || (!__Xbarrel_shifter && !__HS__) */ #endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h" #include "asm.h"
#ifdef __HS__ #ifdef __ARCHS__
#ifdef USE_PREFETCH #ifdef USE_PREFETCH
#define PREWRITE(A,B) prefetchw [(A),(B)] #define PREWRITE(A,B) prefetchw [(A),(B)]
@ -81,7 +81,7 @@ ENTRY (memset)
lpnz @.Lset64bytes lpnz @.Lset64bytes
; LOOP START ; LOOP START
PREWRITE (r3, 64) ;Prefetch the next write location PREWRITE (r3, 64) ;Prefetch the next write location
#ifdef __LL64__ #ifdef __ARC_LL64__
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
@ -114,7 +114,7 @@ ENTRY (memset)
lpnz .Lset32bytes lpnz .Lset32bytes
; LOOP START ; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location prefetchw [r3, 32] ;Prefetch the next write location
#ifdef __LL64__ #ifdef __ARC_LL64__
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
@ -141,6 +141,6 @@ ENTRY (memset)
j [blink] j [blink]
ENDFUNC (memset) ENDFUNC (memset)
#endif /* __HS__ */ #endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -42,7 +42,7 @@
better would be to avoid a second entry point into function. ARC HS always better would be to avoid a second entry point into function. ARC HS always
has barrel-shifter, so this implementation will be always used for this has barrel-shifter, so this implementation will be always used for this
purpose. */ purpose. */
#if !defined (__ARC601__) && defined (__Xbarrel_shifter) #if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
/* To deal with alignment/loop issues, SMALL must be at least 2. */ /* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 7 #define SMALL 7
@ -57,12 +57,12 @@
cases, because the copying of a string presumably leaves start address cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */ and length alignment for the zeroing randomly distributed. */
#ifdef __HS__ #ifdef __ARCHS__
ENTRY (__dummy_memset) ENTRY (__dummy_memset)
#else #else
ENTRY (memset) ENTRY (memset)
#endif #endif
#if !defined (__ARC700__) && !defined (__EM__) #if !defined (__ARC700__) && !defined (__ARCEM__)
#undef SMALL #undef SMALL
#define SMALL 8 /* Even faster if aligned. */ #define SMALL 8 /* Even faster if aligned. */
brls.d r2,SMALL,.Ltiny brls.d r2,SMALL,.Ltiny
@ -74,7 +74,7 @@ ENTRY (memset)
asl r12,r1,8 asl r12,r1,8
beq.d .Laligned beq.d .Laligned
or_s r1,r1,r12 or_s r1,r1,r12
#if defined (__ARC700__) || defined (__EM__) #if defined (__ARC700__) || defined (__ARCEM__)
brls r2,SMALL,.Ltiny brls r2,SMALL,.Ltiny
#endif #endif
.Lnot_tiny: .Lnot_tiny:
@ -90,7 +90,7 @@ ENTRY (memset)
stw.ab r1,[r3,2] stw.ab r1,[r3,2]
bclr_s r3,r3,1 bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed. .Laligned: ; This code address should be aligned for speed.
#if defined (__ARC700__) || defined (__EM__) #if defined (__ARC700__) || defined (__ARCEM__)
asl r12,r1,16 asl r12,r1,16
lsr.f lp_count,r2,2 lsr.f lp_count,r2,2
or_s r1,r1,r12 or_s r1,r1,r12
@ -111,7 +111,7 @@ ENTRY (memset)
st_s r1,[r3] st_s r1,[r3]
#endif /* !__ARC700 */ #endif /* !__ARC700 */
#if defined (__ARC700__) || defined (__EM__) #if defined (__ARC700__) || defined (__ARCEM__)
.balign 4 .balign 4
__strncpy_bzero: __strncpy_bzero:
brhi.d r2,17,.Lnot_tiny brhi.d r2,17,.Lnot_tiny
@ -144,11 +144,11 @@ __strncpy_bzero:
stb_s r1,[r3] stb_s r1,[r3]
j_s [blink] j_s [blink]
#endif /* !__ARC700 */ #endif /* !__ARC700 */
#ifdef __HS__ #ifdef __ARCHS__
ENDFUNC (__dummy_memset) ENDFUNC (__dummy_memset)
#else #else
ENDFUNC (memset) ENDFUNC (memset)
#endif #endif
#endif /* !__ARC601__ && __Xbarrel_shifter */ #endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,8 @@
#include "asm.h" #include "asm.h"
#if defined (__ARC601__) || (!defined (__Xbarrel_shifter) && !defined (__HS__)) #if defined (__ARC601__) \
|| (!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* To deal with alignment/loop issues, SMALL must be at least 2. */ /* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 8 /* Even faster if aligned. */ #define SMALL 8 /* Even faster if aligned. */
@ -104,6 +105,6 @@ __strncpy_bzero:
stb_s r1,[r3] stb_s r1,[r3]
j_s [blink] j_s [blink]
ENDFUNC (memset) ENDFUNC (memset)
#endif /* __ARC601__ || (!__Xbarrel_shifter && !__HS__) */ #endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -92,7 +92,7 @@ setjmp:
st r2, [r0, ABIlps] st r2, [r0, ABIlps]
st r3, [r0, ABIlpe] st r3, [r0, ABIlpe]
#if (!defined (__A7__) && !defined (__EM__) && !defined (__HS__)) #if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
; Till the configure changes are decided, and implemented, the code working on ; Till the configure changes are decided, and implemented, the code working on
; mlo/mhi and using mul64 should be disabled. ; mlo/mhi and using mul64 should be disabled.
; st mlo, [r0, ABImlo] ; st mlo, [r0, ABImlo]
@ -145,7 +145,7 @@ longjmp:
sr r2, [lp_start] sr r2, [lp_start]
sr r3, [lp_end] sr r3, [lp_end]
#if (!defined (__A7__) && !defined (__EM__) && !defined (__HS__)) #if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
ld r2, [r0, ABImlo] ld r2, [r0, ABImlo]
ld r3, [r0, ABImhi] ld r3, [r0, ABImhi]
; We do not support restoring of mulhi and mlo registers, yet. ; We do not support restoring of mulhi and mlo registers, yet.

View File

@ -39,8 +39,8 @@
words branch-free. */ words branch-free. */
#include "asm.h" #include "asm.h"
#if (defined (__ARC700__) || defined (__EM__) || defined (__HS__)) \ #if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__Xbarrel_shifter) && defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr) ENTRY (strchr)
extb_s r1,r1 extb_s r1,r1
@ -160,6 +160,7 @@ ENTRY (strchr)
mov.mi r0,0 mov.mi r0,0
#endif /* ENDIAN */ #endif /* ENDIAN */
ENDFUNC (strchr) ENDFUNC (strchr)
#endif /* (__ARC700__ || __EM__ || __HS__) && __ARC_NORM__ && __Xbarrel_shifter */ #endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -49,8 +49,9 @@
Each byte in Y is 0x80 if the the corresponding byte in Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */ W is zero, otherwise that byte of Y is 0. */
#if defined (__Xbarrel_shifter) && \ #if defined (__ARC_BARREL_SHIFTER__) && \
(defined (__ARC600__) || (!defined (__ARC_NORM__) && !defined (__ARC601__))) (defined (__ARC600__) || (!defined (__ARC_NORM__) && !defined (__ARC601__)))
ENTRY (strchr) ENTRY (strchr)
bmsk.f r2,r0,1 bmsk.f r2,r0,1
mov_s r3,0x01010101 mov_s r3,0x01010101
@ -195,6 +196,7 @@ ENTRY (strchr)
add.eq r0,r0,1 add.eq r0,r0,1
#endif /* ENDIAN */ #endif /* ENDIAN */
ENDFUNC (strchr) ENDFUNC (strchr)
#endif /* __Xbarrel_shifter && (__ARC600__ || (!__ARC_NORM__ && !__ARC601__)) */ #endif /* __ARC_BARREL_SHIFTER__ &&
(__ARC600__ || (!__ARC_NORM__ && !__ARC601__)) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -49,7 +49,7 @@
Each byte in Y is 0x80 if the the corresponding byte in Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */ W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC601__) || !defined (__Xbarrel_shifter) #if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr) ENTRY (strchr)
bmsk.f r2,r0,1 bmsk.f r2,r0,1
mov_s r3,0x01010101 mov_s r3,0x01010101
@ -203,6 +203,6 @@ ENTRY (strchr)
add.eq r0,r0,1 add.eq r0,r0,1
#endif /* ENDIAN */ #endif /* ENDIAN */
ENDFUNC (strchr) ENDFUNC (strchr)
#endif /* __ARC601__ || !__Xbarrel_shifter */ #endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h" #include "asm.h"
#ifdef __HS__ #ifdef __ARCHS__
ENTRY (strcmp) ENTRY (strcmp)
or r2, r0, r1 or r2, r0, r1
bmsk_s r2, r2, 1 bmsk_s r2, r2, 1
@ -104,6 +104,6 @@ ENTRY (strcmp)
j_s.d [blink] j_s.d [blink]
sub r0, r2, r3 sub r0, r2, r3
ENDFUNC (strcmp) ENDFUNC (strcmp)
#endif /* __HS__ */ #endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -41,7 +41,7 @@
by a factor of two, and speculatively loading the second word / byte of by a factor of two, and speculatively loading the second word / byte of
source 1; however, that would increase the overhead for loop setup / finish, source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */ and strcmp might often terminate early. */
#ifndef __HS__ #ifndef __ARCHS__
ENTRY (strcmp) ENTRY (strcmp)
or r2,r0,r1 or r2,r0,r1
@ -128,6 +128,6 @@ ENTRY (strcmp)
j_s.d [blink] j_s.d [blink]
sub r0,r2,r3 sub r0,r2,r3
ENDFUNC (strcmp) ENDFUNC (strcmp)
#endif /* !__HS__ */ #endif /* !__ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h" #include "asm.h"
#if defined (__ARC600__) && defined (__Xbarrel_shifter) #if defined (__ARC600__) && defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time. /* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without it 8 byte aligned. Thus, we can do a little read-ahead, without
@ -115,6 +115,6 @@ ENTRY (strcpy)
stb.ab r3,[r10,1] stb.ab r3,[r10,1]
j [blink] j [blink]
ENDFUNC (strcpy) ENDFUNC (strcpy)
#endif /* __ARC600__ && __Xbarrel_shifter */ #endif /* __ARC600__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,8 +35,8 @@
#include "asm.h" #include "asm.h"
#if (defined (__ARC700__) || defined (__EM__) || defined (__HS__)) \ #if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__Xbarrel_shifter) && defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time. /* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
@ -98,6 +98,6 @@ charloop:
stb.ab r3,[r10,1] stb.ab r3,[r10,1]
j [blink] j [blink]
ENDFUNC (strcpy) ENDFUNC (strcpy)
#endif /* (__ARC700__ || __EM__ || __HS__) && __Xbarrel_shifter */ #endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h" #include "asm.h"
#if defined (__ARC601__) || !defined (__Xbarrel_shifter) #if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time. /* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without it 8 byte aligned. Thus, we can do a little read-ahead, without
@ -85,6 +85,6 @@ ENTRY (strcpy)
stb.ab r3,[r10,1] stb.ab r3,[r10,1]
j_s [blink] j_s [blink]
ENDFUNC (strcpy) ENDFUNC (strcpy)
#endif /* __ARC601__ || !__Xbarrel_shifter */ #endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -34,8 +34,8 @@
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED) #if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h" #include "asm.h"
#if (defined (__ARC700__) || defined (__EM__) || defined (__HS__)) \ #if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__Xbarrel_shifter) && defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strlen) ENTRY (strlen)
or r3,r0,7 or r3,r0,7
@ -110,6 +110,7 @@ ENTRY (strlen)
b.d .Lend b.d .Lend
sub_s.ne r1,r1,r1 sub_s.ne r1,r1,r1
ENDFUNC (strlen) ENDFUNC (strlen)
#endif /* (__ARC700__ || __EM__ || __HS__) && __ARC_NORM__ && _Xbarrel_shifter */ #endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -36,7 +36,7 @@
#include "asm.h" #include "asm.h"
#if (defined (__ARC600__) || !defined (__ARC_NORM__)) && !defined (__ARC601__) \ #if (defined (__ARC600__) || !defined (__ARC_NORM__)) && !defined (__ARC601__) \
&& defined (__Xbarrel_shifter) && defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC600 pipeline. */ /* This code is optimized for the ARC600 pipeline. */
ENTRY (strlen) ENTRY (strlen)
@ -117,6 +117,6 @@ ENTRY (strlen)
b.d .Lend b.d .Lend
sub_s.ne r1,r1,r1 sub_s.ne r1,r1,r1
ENDFUNC (strlen) ENDFUNC (strlen)
#endif /* (__ARC600__ || !__ARC_NORM__) && !__ARC601__ && __Xbarrel_shifter */ #endif /* (__ARC600__ || !__ARC_NORM__) && !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -35,7 +35,7 @@
#include "asm.h" #include "asm.h"
#if defined(__ARC601__) || !defined (__Xbarrel_shifter) #if defined(__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC601 pipeline without barrel shifter. */ /* This code is optimized for the ARC601 pipeline without barrel shifter. */
ENTRY (strlen) ENTRY (strlen)
@ -160,6 +160,6 @@ ENTRY (strlen)
sub_s.ne r1,r1,r1 sub_s.ne r1,r1,r1
#endif /* !SPECIAL_EARLY_END */ #endif /* !SPECIAL_EARLY_END */
ENDFUNC (strlen) ENDFUNC (strlen)
#endif /* __ARC601__ || !__Xbarrel_shifter*/ #endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__*/
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -45,9 +45,9 @@
there, but the it is not likely to be taken often, and it there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */ would also be likey to cost an unaligned mispredict at the next call. */
#if !defined (__ARC601__) && defined (__Xbarrel_shifter) #if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
#if defined (__ARC700___) || defined (__EM__) || defined (__HS__) #if defined (__ARC700___) || defined (__ARCEM__) || defined (__ARCHS__)
#define BRand(a,b,l) tst a,b ` bne_l l #define BRand(a,b,l) tst a,b ` bne_l l
#else #else
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l #define BRand(a,b,l) and a,a,b ` brne_s a,0,l
@ -112,7 +112,7 @@ ENTRY (strncpy)
.Lr4z: .Lr4z:
mov_l r3,r4 mov_l r3,r4
.Lr3z: .Lr3z:
#if defined (__ARC700__) || defined (__EM__) || defined (__HS__) #if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__ #ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7 bmsk.f r1,r3,7
lsr_s r3,r3,8 lsr_s r3,r3,8
@ -166,6 +166,6 @@ ENTRY (strncpy)
j_s.d [blink] j_s.d [blink]
stb_l r12,[r3] stb_l r12,[r3]
ENDFUNC (strncpy) ENDFUNC (strncpy)
#endif /* !__ARC601__ && __Xbarrel_shifter */ #endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */

View File

@ -40,7 +40,7 @@
it 8 byte aligned. Thus, we can do a little read-ahead, without it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch. */ dereferencing a cache line that we should not touch. */
#if defined (__ARC601__) || !defined (__Xbarrel_shifter) #if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l #define BRand(a,b,l) and a,a,b ` brne_s a,0,l
@ -129,6 +129,6 @@ ENTRY (strncpy)
j_s.d [blink] j_s.d [blink]
stb_s r12,[r3] stb_s r12,[r3]
ENDFUNC (strncpy) ENDFUNC (strncpy)
#endif /* __ARC601__ || !__Xbarrel_shifter */ #endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */ #endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */