From ebf139fbb8948067ecc70431516e53f0a5d2b7e5 Mon Sep 17 00:00:00 2001 From: Nick Clifton Date: Thu, 17 Jan 2002 17:02:08 +0000 Subject: Fix bug when len == 1 and dst was not word aligned. --- newlib/libc/machine/xscale/memset.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'newlib/libc') diff --git a/newlib/libc/machine/xscale/memset.c b/newlib/libc/machine/xscale/memset.c index bfd0f0d47..ad1fc74bc 100644 --- a/newlib/libc/machine/xscale/memset.c +++ b/newlib/libc/machine/xscale/memset.c @@ -11,6 +11,7 @@ void * memset (void *dst, int c, size_t len) { int dummy; + asm volatile ("tst %0, #0x3" #ifndef __OPTIMIZE_SIZE__ " @@ -24,6 +25,13 @@ memset (void *dst, int c, size_t len) movs r3, %2 sub %2, %2, #1 bne 0b +# At this point we know that %2 == len == -1 (since the SUB has already taken +# place). If we fall through to the 1: label (as the code used to do), the +# CMP will detect this negative value and branch to the 2: label. This will +# test %2 again, but this time against 0. The test will fail and the loop +# at 2: will go on for (almost) ever. Hence the explicit branch to the end +# of the hand written assembly code. + b 4f 1: cmp %2, #0x3 bls 2f @@ -63,17 +71,18 @@ memset (void *dst, int c, size_t len) 2: movs r3, %2 sub %2, %2, #1 - beq 1f + beq 4f 0: movs r3, %2 sub %2, %2, #1 strb %1, [%0], #1 bne 0b -1:" +4:" : "=&r" (dummy), "=&r" (c), "=&r" (len) : "0" (dst), "1" (c), "2" (len) : "memory", "r3", "r4", "r5", "lr"); + return dst; } -- cgit v1.2.3