diff --git a/newlib/ChangeLog b/newlib/ChangeLog index 79722459b..75c922326 100644 --- a/newlib/ChangeLog +++ b/newlib/ChangeLog @@ -1,5 +1,8 @@ 2002-01-17 Nick Clifton + * libc/machine/xscale/memset.c (memset): Fix bug when len == 1 and + dst was not word aligned. + * libc/sys/arm/syscalls.c (_sbrk): Return -1 rather than aborting if too much memory is requested. diff --git a/newlib/libc/machine/xscale/memset.c b/newlib/libc/machine/xscale/memset.c index bfd0f0d47..ad1fc74bc 100644 --- a/newlib/libc/machine/xscale/memset.c +++ b/newlib/libc/machine/xscale/memset.c @@ -11,6 +11,7 @@ void * memset (void *dst, int c, size_t len) { int dummy; + asm volatile ("tst %0, #0x3" #ifndef __OPTIMIZE_SIZE__ " @@ -24,6 +25,13 @@ memset (void *dst, int c, size_t len) movs r3, %2 sub %2, %2, #1 bne 0b +# At this point we know that %2 == len == -1 (since the SUB has already taken +# place). If we fall through to the 1: label (as the code used to do), the +# CMP will detect this negative value and branch to the 2: label. This will +# test %2 again, but this time against 0. The test will fail and the loop +# at 2: will go on for (almost) ever. Hence the explicit branch to the end +# of the hand written assembly code. + b 4f 1: cmp %2, #0x3 bls 2f @@ -63,17 +71,18 @@ memset (void *dst, int c, size_t len) 2: movs r3, %2 sub %2, %2, #1 - beq 1f + beq 4f 0: movs r3, %2 sub %2, %2, #1 strb %1, [%0], #1 bne 0b -1:" +4:" : "=&r" (dummy), "=&r" (c), "=&r" (len) : "0" (dst), "1" (c), "2" (len) : "memory", "r3", "r4", "r5", "lr"); + return dst; }