[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit

Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/build/uClibc/libc/string/sh/memchr.S b/ap/build/uClibc/libc/string/sh/memchr.S
new file mode 100644
index 0000000..6b7142f
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sh/memchr.S
@@ -0,0 +1,30 @@
+/* $Id: memchr.S,v 1.1 2000/04/14 16:49:01 mjd Exp $
+ *
+ * "memchr" implementation of SuperH
+ *
+ * Copyright (C) 1999  Niibe Yutaka
+ *
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/*
+ * void *memchr(const void *s, int c, size_t n);
+ */
+
+#include <sysdep.h>
+
+ENTRY(memchr)
+	tst	r6,r6
+	bt/s	2f
+	 exts.b	r5,r5
+1:	mov.b	@r4,r1
+	cmp/eq	r1,r5
+	bt/s	3f
+	 dt	r6
+	bf/s	1b
+	 add	#1,r4
+2:	mov	#0,r4
+3:	rts
+	 mov	r4,r0
+END(memchr)
+libc_hidden_def (memchr)
diff --git a/ap/build/uClibc/libc/string/sh/sh4/memcpy.S b/ap/build/uClibc/libc/string/sh/sh4/memcpy.S
new file mode 100644
index 0000000..6a229a0
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sh/sh4/memcpy.S
@@ -0,0 +1,1006 @@
+/*
+ * "memcpy" implementation of SuperH
+ *
+ * Copyright (C) 1999  Niibe Yutaka
+ * Copyright (c) 2002  STMicroelectronics Ltd
+ *   Modified from memcpy.S and micro-optimised for SH4
+ *   Stuart Menefy (stuart.menefy@st.com)
+ *
+ * Copyright (c) 2009  STMicroelectronics Ltd
+ *   Optimised using prefetching and 64bit data transfer via FPU
+ *   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ */
+
+/*
+ * void *memcpy(void *dst, const void *src, size_t n);
+ *
+ * It is assumed that there is no overlap between src and dst.
+ * If there is an overlap, then the results are undefined.
+ */
+
+#include <sysdep.h>
+#include <endian.h>
+
+#if defined (__LITTLE_ENDIAN__) && defined (__SH_FPU_ANY__)
+#define	MEMCPY_USES_FPU
+/* Use paired single precision load or store mode for 64-bit tranfering.
+ * FPSCR.SZ=1,FPSCR.SZ=0 is well defined on both SH4-200 and SH4-300.
+ * Currenlty it has been only implemented and tested for little endian mode. */
+.macro FPU_SET_PAIRED_PREC
+	sts	fpscr, r7
+	mov	#0x10, r0	! PR=0 SZ=1
+	shll16	r0
+	lds	r0, fpscr
+.endm
+.macro RESTORE_FPSCR
+	lds	r7, fpscr
+.endm
+.macro DALLOC
+	! Cache allocate + store on dst-32.
+	add	#-32, r1
+	movca.l	r0, @r1
+	add	#32, r1
+.endm
+
+#endif
+
+	!
+	!	GHIJ KLMN OPQR -->  ...G HIJK LMNO PQR.
+	!
+
+	! Size is 16 or greater, and may have trailing bytes
+
+	.balign	32
+.Lcase1:
+	! Read a long word and write a long word at once
+	! At the start of each iteration, r7 contains last long load
+	add	#-1,r5		!  79 EX
+	mov	r4,r2		!   5 MT (0 cycles latency)
+
+	mov.l	@(r0,r5),r7	!  21 LS (2 cycles latency)
+	add	#-4,r5		!  50 EX
+
+	add	#7,r2		!  79 EX
+	!
+#ifdef __LITTLE_ENDIAN__
+	! 6 cycles, 4 bytes per iteration
+3:	mov.l	@(r0,r5),r1	!  21 LS (latency=2)	! NMLK
+	mov	r7, r3		!   5 MT (latency=0)	! RQPO
+
+	cmp/hi	r2,r0		!  57 MT
+	shll16	r3		! 103 EX
+
+	mov	r1,r6		!   5 MT (latency=0)
+	shll8	r3		! 102 EX		! Oxxx
+
+	shlr8	r6		! 106 EX		! xNML
+	mov	r1, r7		!   5 MT (latency=0)
+
+	or	r6,r3		!  82 EX		! ONML
+	bt/s	3b		! 109 BR
+
+	 mov.l	r3,@-r0		!  30 LS
+#else
+3:	mov.l	@(r0,r5),r1	!  21 LS (latency=2)	! KLMN
+	mov	r7,r3		!   5 MT (latency=0)	! OPQR
+
+	cmp/hi	r2,r0		!  57 MT
+	shlr16	r3		! 107 EX
+
+	shlr8	r3		! 106 EX		! xxxO
+	mov	r1,r6		!   5 MT (latency=0)
+
+	shll8	r6		! 102 EX		! LMNx
+	mov	r1,r7		!   5 MT (latency=0)
+
+	or	r6,r3		!  82 EX		! LMNO
+	bt/s	3b		! 109 BR
+
+	 mov.l	r3,@-r0		!  30 LS
+#endif
+	! Finally, copy a byte at once, if necessary
+
+	add	#4,r5		!  50 EX
+	cmp/eq	r4,r0		!  54 MT
+
+	add	#-6,r2		!  50 EX
+	bt	9f		! 109 BR
+
+8:	cmp/hi	r2,r0		!  57 MT
+	mov.b	@(r0,r5),r1	!  20 LS (latency=2)
+
+	bt/s	8b		! 109 BR
+
+	 mov.b	r1,@-r0		!  29 LS
+
+9:	rts
+	 nop
+
+
+	!
+	!	GHIJ KLMN OPQR -->  .GHI JKLM NOPQ R...
+	!
+
+	! Size is 16 or greater, and may have trailing bytes
+
+	.balign	32
+.Lcase3:
+	! Read a long word and write a long word at once
+	! At the start of each iteration, r7 contains last long load
+	add	#-3,r5		! 79 EX
+	mov	r4,r2		!  5 MT (0 cycles latency)
+
+	mov.l	@(r0,r5),r7	! 21 LS (2 cycles latency)
+	add	#-4,r5		! 50 EX
+
+	add	#7,r2		!  79 EX
+	!
+#ifdef __LITTLE_ENDIAN__
+	! 6 cycles, 4 bytes per iteration
+3:	mov.l	@(r0,r5),r1	!  21 LS (latency=2)	! NMLK
+	mov	r7, r3		!   5 MT (latency=0)	! RQPO
+
+	cmp/hi	r2,r0		!  57 MT
+	shll8	r3		! 102 EX		! QPOx
+
+	mov	r1,r6		!   5 MT (latency=0)
+	shlr16	r6		! 107 EX
+
+	shlr8	r6		! 106 EX		! xxxN
+	mov	r1, r7		!   5 MT (latency=0)
+
+	or	r6,r3		!  82 EX		! QPON
+	bt/s	3b		! 109 BR
+
+	 mov.l	r3,@-r0		!  30 LS
+#else
+3:	mov	r7,r3		! OPQR
+	shlr8	r3		! xOPQ
+	mov.l	@(r0,r5),r7	! KLMN
+	mov	r7,r6
+	shll16	r6
+	shll8	r6		! Nxxx
+	or	r6,r3		! NOPQ
+	cmp/hi	r2,r0
+	bt/s	3b
+	 mov.l	r3,@-r0
+#endif
+
+	! Finally, copy a byte at once, if necessary
+
+	add	#6,r5		!  50 EX
+	cmp/eq	r4,r0		!  54 MT
+
+	add	#-6,r2		!  50 EX
+	bt	9f		! 109 BR
+
+8:	cmp/hi	r2,r0		!  57 MT
+	mov.b	@(r0,r5),r1	!  20 LS (latency=2)
+
+	bt/s	8b		! 109 BR
+
+	 mov.b	r1,@-r0		!  29 LS
+
+9:	rts
+	 nop
+
+ENTRY(memcpy)
+
+	! Calculate the invariants which will be used in the remainder
+	! of the code:
+	!
+	!      r4   -->  [ ...  ] DST             [ ...  ] SRC
+	!	         [ ...  ]                 [ ...  ]
+	!	           :                        :
+	!      r0   -->  [ ...  ]       r0+r5 --> [ ...  ]
+	!
+	!
+
+	! Short circuit the common case of src, dst and len being 32 bit aligned
+	! and test for zero length move
+
+	mov	r6, r0		!   5 MT (0 cycle latency)
+	or	r4, r0		!  82 EX
+
+	or	r5, r0		!  82 EX
+	tst	r6, r6		!  86 MT
+
+	bt/s	99f		! 111 BR		(zero len)
+	 tst	#3, r0		!  87 MT
+
+	mov	r4, r0		!   5 MT (0 cycle latency)
+	add	r6, r0		!  49 EX
+
+	bt/s	.Lcase00	! 111 BR		(aligned)
+	 sub	r4, r5		!  75 EX
+
+	! Arguments are not nicely long word aligned or zero len.
+	! Check for small copies, and if so do a simple byte at a time copy.
+	!
+	! Deciding on an exact value of 'small' is not easy, as the point at which
+	! using the optimised routines become worthwhile varies (these are the
+	! cycle counts for differnet sizes using byte-at-a-time vs. optimised):
+	!	size	byte-at-time	long	word	byte
+	!	16	42		39-40	46-50	50-55
+	!	24	58		43-44	54-58	62-67
+	!	36	82		49-50	66-70	80-85
+	! However the penalty for getting it 'wrong' is much higher for long word
+	! aligned data (and this is more common), so use a value of 16.
+
+	mov	#16, r1		!   6 EX
+	cmp/gt	r6,r1		!  56 MT
+
+	add	#-1,r5		!  50 EX
+	bf/s	6f		! 108 BR		(not small)
+
+	 mov	r5, r3		!   5 MT (latency=0)
+	shlr	r6		! 104 EX
+
+	mov.b	@(r0,r5),r1	!  20 LS (latency=2)
+	bf/s	4f		! 111 BR
+
+	 add	#-1,r3		!  50 EX
+	tst	r6, r6		!  86 MT
+
+	bt/s	98f		! 110 BR
+	 mov.b	r1,@-r0		!  29 LS
+
+	! 4 cycles, 2 bytes per iteration
+3:	mov.b	@(r0,r5),r1	!  20 LS (latency=2)
+
+4:	mov.b	@(r0,r3),r2	!  20 LS (latency=2)
+	dt	r6		!  67 EX
+
+	mov.b	r1,@-r0		!  29 LS
+	bf/s	3b		! 111 BR
+
+	 mov.b	r2,@-r0		!  29 LS
+98:
+	rts
+	 nop
+
+99:	rts
+	 mov	r4, r0
+
+	! Size is not small, so its worthwhile looking for optimisations.
+	! First align destination to a long word boundary.
+	!
+	! r5 = normal value -1
+
+6:	tst	#3, r0		!  87 MT
+        mov	#3, r3		!   6 EX
+
+	bt/s	2f		! 111 BR
+	 and	r0,r3		!  78 EX
+
+	! 3 cycles, 1 byte per iteration
+1:	dt	r3		!  67 EX
+	mov.b	@(r0,r5),r1	!  19 LS (latency=2)
+
+	add	#-1, r6		!  79 EX
+	bf/s	1b		! 109 BR
+
+	 mov.b	r1,@-r0		!  28 LS
+
+2:	add	#1, r5		!  79 EX
+
+	! Now select the appropriate bulk transfer code based on relative
+	! alignment of src and dst.
+
+	mov	r0, r3		!   5 MT (latency=0)
+
+	mov	r5, r0		!   5 MT (latency=0)
+	tst	#1, r0		!  87 MT
+
+	bf/s	1f		! 111 BR
+	 mov	#64, r7		!   6 EX
+
+	! bit 0 clear
+
+	cmp/ge	r7, r6		!  55 MT
+
+	bt/s	2f		! 111 BR
+	 tst	#2, r0		!  87 MT
+
+	! small
+	bt/s	.Lcase0
+	 mov	r3, r0
+
+	bra	.Lcase2
+	 nop
+
+	! big
+2:	bt/s	.Lcase0b
+	 mov	r3, r0
+
+	bra	.Lcase2b
+	 nop
+
+	! bit 0 set
+1:	tst	#2, r0		! 87 MT
+
+	bt/s	.Lcase1
+	 mov	r3, r0
+
+	bra	.Lcase3
+	 nop
+
+
+	!
+	!	GHIJ KLMN OPQR -->  GHIJ KLMN OPQR
+	!
+
+	! src, dst and size are all long word aligned
+	! size is non-zero
+
+	.balign	32
+.Lcase00:
+	mov	#64, r1		!   6 EX
+	mov	r5, r3		!   5 MT (latency=0)
+
+	cmp/gt	r6, r1		!  56 MT
+	add	#-4, r5		!  50 EX
+
+	bf	.Lcase00b	! 108 BR		(big loop)
+	shlr2	r6		! 105 EX
+
+	shlr	r6		! 104 EX
+	mov.l	@(r0, r5), r1	!  21 LS (latency=2)
+
+	bf/s	4f		! 111 BR
+	 add	#-8, r3		!  50 EX
+
+	tst	r6, r6		!  86 MT
+	bt/s	5f		! 110 BR
+
+	 mov.l	r1,@-r0		!  30 LS
+
+	! 4 cycles, 2 long words per iteration
+3:	mov.l	@(r0, r5), r1	!  21 LS (latency=2)
+
+4:	mov.l	@(r0, r3), r2	!  21 LS (latency=2)
+	dt	r6		!  67 EX
+
+	mov.l	r1, @-r0	!  30 LS
+	bf/s	3b		! 109 BR
+
+	 mov.l	r2, @-r0	!  30 LS
+
+5:	rts
+	 nop
+
+
+	! Size is 16 or greater and less than 64, but may have trailing bytes
+
+	.balign	32
+.Lcase0:
+	add	#-4, r5		!  50 EX
+	mov	r4, r7		!   5 MT (latency=0)
+
+	mov.l	@(r0, r5), r1	!  21 LS (latency=2)
+	mov	#4, r2		!   6 EX
+
+	add	#11, r7		!  50 EX
+	tst	r2, r6		!  86 MT
+
+	mov	r5, r3		!   5 MT (latency=0)
+	bt/s	4f		! 111 BR
+
+	 add	#-4, r3		!  50 EX
+	mov.l	r1,@-r0		!  30 LS
+
+	! 4 cycles, 2 long words per iteration
+3:	mov.l	@(r0, r5), r1	!  21 LS (latency=2)
+
+4:	mov.l	@(r0, r3), r2	!  21 LS (latency=2)
+	cmp/hi	r7, r0
+
+	mov.l	r1, @-r0	!  30 LS
+	bt/s	3b		! 109 BR
+
+	 mov.l	r2, @-r0	!  30 LS
+
+	! Copy the final 0-3 bytes
+
+	add	#3,r5		!  50 EX
+
+	cmp/eq	r0, r4		!  54 MT
+	add	#-10, r7	!  50 EX
+
+	bt	9f		! 110 BR
+
+	! 3 cycles, 1 byte per iteration
+1:	mov.b	@(r0,r5),r1	!  19 LS
+	cmp/hi	r7,r0		!  57 MT
+
+	bt/s	1b		! 111 BR
+	 mov.b	r1,@-r0		!  28 LS
+
+9:	rts
+	 nop
+
+	! Size is at least 64 bytes, so will be going round the big loop at least once.
+	!
+	!   r2 = rounded up r4
+	!   r3 = rounded down r0
+
+	.balign	32
+.Lcase0b:
+	add	#-4, r5		!  50 EX
+
+.Lcase00b:
+	mov	r0, r3		!   5 MT (latency=0)
+	mov	#(~0x1f), r1	!   6 EX
+
+	and	r1, r3		!  78 EX
+	mov	r4, r2		!   5 MT (latency=0)
+
+	cmp/eq	r3, r0		!  54 MT
+	add	#0x1f, r2	!  50 EX
+
+	bt/s	1f		! 110 BR
+	 and	r1, r2		!  78 EX
+
+	! copy initial words until cache line aligned
+
+	mov.l	@(r0, r5), r1	!  21 LS (latency=2)
+	tst	#4, r0		!  87 MT
+
+	mov	r5, r6		!   5 MT (latency=0)
+	add	#-4, r6		!  50 EX
+
+	bt/s	4f		! 111 BR
+	 add	#8, r3		!  50 EX
+
+	tst	#0x18, r0	!  87 MT
+
+	bt/s	1f		! 109 BR
+	 mov.l	r1,@-r0		!  30 LS
+
+	! 4 cycles, 2 long words per iteration
+3:	mov.l	@(r0, r5), r1	!  21 LS (latency=2)
+
+4:	mov.l	@(r0, r6), r7	!  21 LS (latency=2)
+	cmp/eq	r3, r0		!  54 MT
+
+	mov.l	r1, @-r0	!  30 LS
+	bf/s	3b		! 109 BR
+
+	 mov.l	r7, @-r0	!  30 LS
+
+#ifdef MEMCPY_USES_FPU
+	! Copy the cache line aligned blocks by using the FPU registers.
+	! If src and dst are well aligned adopt 64-bit data transfer.
+	! We also need r0 as a temporary (for movca), so 'undo' the invariant:
+	!   r5:	 src (was r0+r5)
+	!   r1:	 dest (was r0)
+1:
+	add	r0, r5
+	mov	r0, r1
+
+	mov	r1, r3		! MT
+	sub	r2, r3		! EX (r3 - r2 -> r3)
+	mov	#-5, r0
+	shld	r0, r3		! number of the cache lines
+
+	mov	#8, r0
+	cmp/ge	r0, r3		! Check if there are many cache lines to copy.
+	bf	45f		! Copy cache line aligned blocks without pref.
+	mov	r5, r0
+	add	#-0x7c, r0
+	tst	#7, r0		! src is 8byte aligned
+	bf	45f
+
+	! Many cache lines have to be copied and the buffers are well aligned.
+	! Aggressive prefetching and FPU in single paired precision.
+	mov	r0, r5
+	mov	r5, r6
+	add	#-0x80, r6	! prefetch head
+
+	! store FPU (in single precision mode, do not check R15 align).
+	fmov	fr12, @-r15
+	fmov	fr13, @-r15
+	fmov	fr14, @-r15
+	fmov	fr15, @-r15
+
+	FPU_SET_PAIRED_PREC
+
+	mov	#4, r0
+67:
+	add	#-0x20, r6
+	pref	@r6
+	add	#-0x20, r6
+	pref	@r6
+
+	fmov	@r5+, dr0
+	fmov	@r5+, dr2
+	fmov	@r5+, dr4
+	fmov	@r5+, dr6
+	fmov	@r5+, dr8
+	fmov	@r5+, dr10
+	fmov	@r5+, dr12
+	fmov	@r5+, dr14
+	fmov	@r5+, xd0
+	fmov	@r5+, xd2
+	fmov	@r5+, xd4
+	fmov	@r5+, xd6
+	fmov	@r5+, xd8
+	fmov	@r5+, xd10
+	fmov	@r5+, xd12
+	fmov	@r5+, xd14
+
+	DALLOC
+	fmov	xd14, @-r1
+	fmov	xd12, @-r1
+	fmov	xd10, @-r1
+	fmov	xd8, @-r1
+	DALLOC
+	fmov	xd6, @-r1
+	fmov	xd4, @-r1
+	fmov	xd2, @-r1
+	fmov	xd0, @-r1
+	DALLOC
+	fmov	dr14, @-r1
+	fmov	dr12, @-r1
+	fmov	dr10, @-r1
+	fmov	dr8, @-r1
+	DALLOC
+	fmov	dr6, @-r1
+	add	#-0x80, r5
+	fmov	dr4, @-r1
+	add	#-0x80, r5
+	fmov	dr2, @-r1
+	add	#-0x20, r6
+	fmov	dr0, @-r1
+	add	#-4, r3
+	pref	@r6
+	add	#-0x20, r6
+	cmp/ge	r0, r3
+	bt/s	67b
+	 pref	@r6
+
+	RESTORE_FPSCR
+
+	! Restore FPU callee save registers
+	fmov	@r15+, fr15
+	fmov	@r15+, fr14
+	fmov	@r15+, fr13
+	fmov	@r15+, fr12
+
+	! Other cache lines could be copied: so use the FPU in single paired
+	! precision without prefetching. No check for alignment is necessary.
+
+	mov	#1, r0
+	cmp/ge	r0, r3
+	bt/s	3f
+	 add	#0x60, r5
+
+	bra	5f
+	 nop
+
+	! No prefetch and FPU in single precision.
+45:
+	add	#-0x1c, r5
+	mov	r5, r0
+	tst	#7, r0
+	bt	3f
+
+2:	fmov.s	@r5+, fr0
+	fmov.s	@r5+, fr1
+	fmov.s	@r5+, fr2
+	fmov.s	@r5+, fr3
+	fmov.s	@r5+, fr4
+	fmov.s	@r5+, fr5
+	fmov.s	@r5+, fr6
+	fmov.s	@r5+, fr7
+
+	DALLOC
+
+	fmov.s	fr7, @-r1
+	fmov.s	fr6, @-r1
+	fmov.s	fr5, @-r1
+	fmov.s	fr4, @-r1
+	fmov.s	fr3, @-r1
+	fmov.s	fr2, @-r1
+	fmov.s	fr1, @-r1
+	fmov.s	fr0, @-r1
+
+	cmp/eq	r2,r1
+
+	bf/s	2b
+	 add	#-0x40, r5
+
+	bra	5f
+	 nop
+
+	! No prefetch and FPU in single paired precision.
+
+3:	FPU_SET_PAIRED_PREC
+
+4:	fmov	@r5+, dr0
+	fmov	@r5+, dr2
+	fmov	@r5+, dr4
+	fmov	@r5+, dr6
+
+	DALLOC
+
+	fmov	dr6, @-r1
+	fmov	dr4, @-r1
+	fmov	dr2, @-r1
+	fmov	dr0, @-r1
+	cmp/eq	r2,r1
+
+	bf/s	4b
+	 add	#-0x40, r5
+
+	RESTORE_FPSCR
+
+5:	mov	r1, r0
+
+	cmp/eq	r4, r0		!  54 MT
+	bf/s	1f		! 109 BR
+	 sub	r1, r5		!  75 EX
+
+	rts
+	 nop
+1:
+#else
+	! Copy the cache line aligned blocks
+	!
+	! In use: r0, r2, r4, r5
+	! Scratch: r1, r3, r6, r7
+	!
+	! We could do this with the four scratch registers, but if src
+	! and dest hit the same cache line, this will thrash, so make
+	! use of additional registers.
+	!
+	! We also need r0 as a temporary (for movca), so 'undo' the invariant:
+	!   r5:	 src (was r0+r5)
+	!   r1:	 dest (was r0)
+	! this can be reversed at the end, so we don't need to save any extra
+	! state.
+	!
+1:	mov.l	r8, @-r15	!  30 LS
+	add	r0, r5		!  49 EX
+
+	mov.l	r9, @-r15	!  30 LS
+	mov	r0, r1		!   5 MT (latency=0)
+
+	mov.l	r10, @-r15	!  30 LS
+	add	#-0x1c, r5	!  50 EX
+
+	mov.l	r11, @-r15	!  30 LS
+
+	! 16 cycles, 32 bytes per iteration
+2:	mov.l	@(0x00,r5),r0	! 18 LS (latency=2)
+	add	#-0x20, r1	! 50 EX
+	mov.l	@(0x04,r5),r3	! 18 LS (latency=2)
+	mov.l	@(0x08,r5),r6	! 18 LS (latency=2)
+	mov.l	@(0x0c,r5),r7	! 18 LS (latency=2)
+	mov.l	@(0x10,r5),r8	! 18 LS (latency=2)
+	mov.l	@(0x14,r5),r9	! 18 LS (latency=2)
+	mov.l	@(0x18,r5),r10	! 18 LS (latency=2)
+	mov.l	@(0x1c,r5),r11	! 18 LS (latency=2)
+	movca.l	r0,@r1		! 40 LS (latency=3-7)
+	mov.l	r3,@(0x04,r1)	! 33 LS
+	mov.l	r6,@(0x08,r1)	! 33 LS
+	mov.l	r7,@(0x0c,r1)	! 33 LS
+
+	mov.l	r8,@(0x10,r1)	! 33 LS
+	add	#-0x20, r5	! 50 EX
+
+	mov.l	r9,@(0x14,r1)	! 33 LS
+	cmp/eq	r2,r1		! 54 MT
+
+	mov.l	r10,@(0x18,r1)	!  33 LS
+	bf/s	2b		! 109 BR
+
+	 mov.l	r11,@(0x1c,r1)	!  33 LS
+
+	mov	r1, r0		!   5 MT (latency=0)
+
+	mov.l	@r15+, r11	!  15 LS
+	sub	r1, r5		!  75 EX
+
+	mov.l	@r15+, r10	!  15 LS
+	cmp/eq	r4, r0		!  54 MT
+
+	bf/s	1f		! 109 BR
+	 mov.l	 @r15+, r9	!  15 LS
+
+	rts
+1:	 mov.l	@r15+, r8	!  15 LS
+#endif
+	sub	r4, r1		!  75 EX		(len remaining)
+
+	! number of trailing bytes is non-zero
+	!
+	! invariants restored (r5 already decremented by 4)
+	! also r1=num bytes remaining
+
+	mov	#4, r2		!   6 EX
+	mov	r4, r7		!   5 MT (latency=0)
+
+	add	#0x1c, r5	!  50 EX		(back to -4)
+	cmp/hs	r2, r1		!  58 MT
+
+	bf/s	5f		! 108 BR
+	 add	 #11, r7	!  50 EX
+
+	mov.l	@(r0, r5), r6	!  21 LS (latency=2)
+	tst	r2, r1		!  86 MT
+
+	mov	r5, r3		!   5 MT (latency=0)
+	bt/s	4f		! 111 BR
+
+	 add	#-4, r3		!  50 EX
+	cmp/hs	r2, r1		!  58 MT
+
+	bt/s	5f		! 111 BR
+	 mov.l	r6,@-r0		!  30 LS
+
+	! 4 cycles, 2 long words per iteration
+3:	mov.l	@(r0, r5), r6	!  21 LS (latency=2)
+
+4:	mov.l	@(r0, r3), r2	!  21 LS (latency=2)
+	cmp/hi	r7, r0
+
+	mov.l	r6, @-r0	!  30 LS
+	bt/s	3b		! 109 BR
+
+	 mov.l	r2, @-r0	!  30 LS
+
+	! Copy the final 0-3 bytes
+
+5:	cmp/eq	r0, r4		!  54 MT
+	add	#-10, r7	!  50 EX
+
+	bt	9f		! 110 BR
+	add	#3,r5		!  50 EX
+
+	! 3 cycles, 1 byte per iteration
+1:	mov.b	@(r0,r5),r1	!  19 LS
+	cmp/hi	r7,r0		!  57 MT
+
+	bt/s	1b		! 111 BR
+	 mov.b	r1,@-r0		!  28 LS
+
+9:	rts
+	 nop
+
+	!
+	!	GHIJ KLMN OPQR -->  ..GH IJKL MNOP QR..
+	!
+
+	.balign	32
+.Lcase2:
+	! Size is 16 or greater and less then 64, but may have trailing bytes
+
+2:	mov	r5, r6		!   5 MT (latency=0)
+	add	#-2,r5		!  50 EX
+
+	mov	r4,r2		!   5 MT (latency=0)
+	add	#-4,r6		!  50 EX
+
+	add	#7,r2		!  50 EX
+3:	mov.w	@(r0,r5),r1	!  20 LS (latency=2)
+
+	mov.w	@(r0,r6),r3	!  20 LS (latency=2)
+	cmp/hi	r2,r0		!  57 MT
+
+	mov.w	r1,@-r0		!  29 LS
+	bt/s	3b		! 111 BR
+
+	 mov.w	r3,@-r0		!  29 LS
+
+	bra	10f
+	 nop
+
+
+	.balign	32
+.Lcase2b:
+	! Size is at least 64 bytes, so will be going round the big loop at least once.
+	!
+	!   r2 = rounded up r4
+	!   r3 = rounded down r0
+
+	mov	r0, r3		!   5 MT (latency=0)
+	mov	#(~0x1f), r1	!   6 EX
+
+	and	r1, r3		!  78 EX
+	mov	r4, r2		!   5 MT (latency=0)
+
+	cmp/eq	r3, r0		!  54 MT
+	add	#0x1f, r2	!  50 EX
+
+	add	#-2, r5		!  50 EX
+	bt/s	1f		! 110 BR
+	 and	r1, r2		!  78 EX
+
+	! Copy a short word one at a time until we are cache line aligned
+	!   Normal values: r0, r2, r3, r4
+	!   Unused: r1, r6, r7
+	!   Mod: r5 (=r5-2)
+	!
+	add	#2, r3		!  50 EX
+
+2:	mov.w	@(r0,r5),r1	!  20 LS (latency=2)
+	cmp/eq	r3,r0		!  54 MT
+
+	bf/s	2b		! 111 BR
+
+	 mov.w	r1,@-r0		!  29 LS
+
+	! Copy the cache line aligned blocks
+	!
+	! In use: r0, r2, r4, r5 (=r5-2)
+	! Scratch: r1, r3, r6, r7
+	!
+	! We could do this with the four scratch registers, but if src
+	! and dest hit the same cache line, this will thrash, so make
+	! use of additional registers.
+	!
+	! We also need r0 as a temporary (for movca), so 'undo' the invariant:
+	!   r5:	 src (was r0+r5)
+	!   r1:	 dest (was r0)
+	! this can be reversed at the end, so we don't need to save any extra
+	! state.
+	!
+1:	mov.l	r8, @-r15	!  30 LS
+	add	r0, r5		!  49 EX
+
+	mov.l	r9, @-r15	!  30 LS
+	mov	r0, r1		!   5 MT (latency=0)
+
+	mov.l	r10, @-r15	!  30 LS
+	add	#-0x1e, r5	!  50 EX
+
+	mov.l	r11, @-r15	!  30 LS
+
+	mov.l	r12, @-r15	!  30 LS
+
+	! 17 cycles, 32 bytes per iteration
+#ifdef __LITTLE_ENDIAN__
+2:	mov.w	@r5+, r0	!  14 LS (latency=2)		..JI
+	add	#-0x20, r1	!  50 EX
+
+	mov.l	@r5+, r3	!  15 LS (latency=2)		NMLK
+
+	mov.l	@r5+, r6	!  15 LS (latency=2)		RQPO
+	shll16	r0		! 103 EX			JI..
+
+	mov.l	@r5+, r7	!  15 LS (latency=2)
+	xtrct	r3, r0		!  48 EX			LKJI
+
+	mov.l	@r5+, r8	!  15 LS (latency=2)
+	xtrct	r6, r3		!  48 EX			PONM
+
+	mov.l	@r5+, r9	!  15 LS (latency=2)
+	xtrct	r7, r6		!  48 EX
+
+	mov.l	@r5+, r10	!  15 LS (latency=2)
+	xtrct	r8, r7		!  48 EX
+
+	mov.l	@r5+, r11	!  15 LS (latency=2)
+	xtrct	r9, r8		!  48 EX
+
+	mov.w	@r5+, r12	!  15 LS (latency=2)
+	xtrct	r10, r9		!  48 EX
+
+	movca.l	r0,@r1		!  40 LS (latency=3-7)
+	xtrct	r11, r10	!  48 EX
+
+	mov.l	r3, @(0x04,r1)	!  33 LS
+	xtrct	r12, r11	!  48 EX
+
+	mov.l	r6, @(0x08,r1)	!  33 LS
+
+	mov.l	r7, @(0x0c,r1)	!  33 LS
+
+	mov.l	r8, @(0x10,r1)	!  33 LS
+	add	#-0x40, r5	!  50 EX
+
+	mov.l	r9, @(0x14,r1)	!  33 LS
+	cmp/eq	r2,r1		!  54 MT
+
+	mov.l	r10, @(0x18,r1)	!  33 LS
+	bf/s	2b		! 109 BR
+
+	 mov.l	r11, @(0x1c,r1)	!  33 LS
+#else
+2:	mov.w	@(0x1e,r5), r0	!  17 LS (latency=2)
+	add	#-2, r5		!  50 EX
+
+	mov.l	@(0x1c,r5), r3	!  18 LS (latency=2)
+	add	#-4, r1		!  50 EX
+
+	mov.l	@(0x18,r5), r6	!  18 LS (latency=2)
+	shll16	r0		! 103 EX
+
+	mov.l	@(0x14,r5), r7	!  18 LS (latency=2)
+	xtrct	r3, r0		!  48 EX
+
+	mov.l	@(0x10,r5), r8	!  18 LS (latency=2)
+	xtrct	r6, r3		!  48 EX
+
+	mov.l	@(0x0c,r5), r9	!  18 LS (latency=2)
+	xtrct	r7, r6		!  48 EX
+
+	mov.l	@(0x08,r5), r10	!  18 LS (latency=2)
+	xtrct	r8, r7		!  48 EX
+
+	mov.l	@(0x04,r5), r11	!  18 LS (latency=2)
+	xtrct	r9, r8		!  48 EX
+
+	mov.l   @(0x00,r5), r12 !  18 LS (latency=2)
+	xtrct	r10, r9		!  48 EX
+
+	movca.l	r0,@r1		!  40 LS (latency=3-7)
+	add	#-0x1c, r1	!  50 EX
+
+	mov.l	r3, @(0x18,r1)	!  33 LS
+	xtrct	r11, r10	!  48 EX
+
+	mov.l	r6, @(0x14,r1)	!  33 LS
+	xtrct	r12, r11	!  48 EX
+
+	mov.l	r7, @(0x10,r1)	!  33 LS
+
+	mov.l	r8, @(0x0c,r1)	!  33 LS
+	add	#-0x1e, r5	!  50 EX
+
+	mov.l	r9, @(0x08,r1)	!  33 LS
+	cmp/eq	r2,r1		!  54 MT
+
+	mov.l	r10, @(0x04,r1)	!  33 LS
+	bf/s	2b		! 109 BR
+
+	 mov.l	r11, @(0x00,r1)	!  33 LS
+#endif
+
+	mov.l	@r15+, r12
+	mov	r1, r0		!   5 MT (latency=0)
+
+	mov.l	@r15+, r11	!  15 LS
+	sub	r1, r5		!  75 EX
+
+	mov.l	@r15+, r10	!  15 LS
+	cmp/eq	r4, r0		!  54 MT
+
+	bf/s	1f		! 109 BR
+	 mov.l	 @r15+, r9	!  15 LS
+
+	rts
+1:	 mov.l	@r15+, r8	!  15 LS
+
+	add	#0x1e, r5	!  50 EX
+
+	! Finish off a short word at a time
+	! r5 must be invariant - 2
+10:	mov	r4,r2		!   5 MT (latency=0)
+	add	#1,r2		!  50 EX
+
+	cmp/hi	r2, r0		!  57 MT
+	bf/s	1f		! 109 BR
+
+	 add	#2, r2		!  50 EX
+
+3:	mov.w	@(r0,r5),r1	!  20 LS
+	cmp/hi	r2,r0		!  57 MT
+
+	bt/s	3b		! 109 BR
+
+	 mov.w	r1,@-r0		!  29 LS
+1:
+
+	!
+	! Finally, copy the last byte if necessary
+	cmp/eq	r4,r0		!  54 MT
+	bt/s	9b
+	 add	#1,r5
+	mov.b	@(r0,r5),r1
+	rts
+	 mov.b	r1,@-r0
+
+END(memcpy)
+libc_hidden_def (memcpy)
diff --git a/ap/build/uClibc/libc/string/sh/sh4/memmove.c b/ap/build/uClibc/libc/string/sh/sh4/memmove.c
new file mode 100644
index 0000000..8059bd4
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sh/sh4/memmove.c
@@ -0,0 +1,121 @@
+/* memmove implementation for SH4
+ *
+ * Copyright (C) 2009 STMicroelectronics Ltd.
+ *
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+#ifndef __SH_FPU_ANY__
+#include "../../generic/memmove.c"
+#else
+
+#include <string.h>
+
+#define FPSCR_SR	(1 << 20)
+#define STORE_FPSCR(x)	__asm__ __volatile__("sts fpscr, %0" : "=r"(x))
+#define LOAD_FPSCR(x)	__asm__ __volatile__("lds %0, fpscr" : : "r"(x))
+
+static void fpu_optimised_copy_fwd(void *dest, const void *src, size_t len)
+{
+	char *d = (char *)dest;
+	char *s = (char *)src;
+
+	if (len >= 64) {
+		unsigned long fpscr;
+		int *s1;
+		int *d1;
+
+		/* Align the dest to 4 byte boundary. */
+		while ((unsigned)d & 0x7) {
+			*d++ = *s++;
+			len--;
+		}
+
+		s1 = (int *)s;
+		d1 = (int *)d;
+
+		/* check if s is well aligned to use FPU */
+		if (!((unsigned)s1 & 0x7)) {
+
+			/* Align the dest to cache-line boundary */
+			while ((unsigned)d1 & 0x1c) {
+				*d1++ = *s1++;
+				len -= 4;
+			}
+
+			/* Use paired single precision load or store mode for
+			 * 64-bit tranfering.*/
+			STORE_FPSCR(fpscr);
+			LOAD_FPSCR(FPSCR_SR);
+
+			while (len >= 32) {
+				__asm__ __volatile__ ("fmov @%0+,dr0":"+r" (s1));
+				__asm__ __volatile__ ("fmov @%0+,dr2":"+r" (s1));
+				__asm__ __volatile__ ("fmov @%0+,dr4":"+r" (s1));
+				__asm__ __volatile__ ("fmov @%0+,dr6":"+r" (s1));
+				__asm__
+				    __volatile__ ("fmov dr0,@%0"::"r"
+					      (d1):"memory");
+				d1 += 2;
+				__asm__
+				    __volatile__ ("fmov dr2,@%0"::"r"
+					      (d1):"memory");
+				d1 += 2;
+				__asm__
+				    __volatile__ ("fmov dr4,@%0"::"r"
+					      (d1):"memory");
+				d1 += 2;
+				__asm__
+				    __volatile__ ("fmov dr6,@%0"::"r"
+					      (d1):"memory");
+				d1 += 2;
+				len -= 32;
+			}
+			LOAD_FPSCR(fpscr);
+		}
+		s = (char *)s1;
+		d = (char *)d1;
+		/*TODO: other subcases could be covered here?!?*/
+	}
+	/* Go to per-byte copy */
+	while (len > 0) {
+		*d++ = *s++;
+		len--;
+	}
+	return;
+}
+
+void *memmove(void *dest, const void *src, size_t len)
+{
+	unsigned long int d = (long int)dest;
+	unsigned long int s = (long int)src;
+	unsigned long int res;
+
+	if (d >= s)
+		res = d - s;
+	else
+		res = s - d;
+	/*
+	 * 1) dest and src are not overlap  ==> memcpy (BWD/FDW)
+	 * 2) dest and src are 100% overlap ==> memcpy (BWD/FDW)
+	 * 3) left-to-right overlap ==>  Copy from the beginning to the end
+	 * 4) right-to-left overlap ==>  Copy from the end to the beginning
+	 */
+
+	if (res == 0)		/* 100% overlap */
+		memcpy(dest, src, len);	/* No overlap */
+	else if (res >= len)
+		memcpy(dest, src, len);
+	else {
+		if (d > s)	/* right-to-left overlap */
+			memcpy(dest, src, len);	/* memcpy is BWD */
+		else		/* cannot use SH4 memcpy for this case */
+			fpu_optimised_copy_fwd(dest, src, len);
+	}
+	return (dest);
+}
+
+libc_hidden_def(memmove)
+#endif /*__SH_FPU_ANY__ */
diff --git a/ap/build/uClibc/libc/string/sh/sh4/memset.S b/ap/build/uClibc/libc/string/sh/sh4/memset.S
new file mode 100644
index 0000000..eb83355
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sh/sh4/memset.S
@@ -0,0 +1,152 @@
+/* $Id: memset.S,v 1.1 2000/04/14 16:49:01 mjd Exp $
+ *
+ * "memset" implementation of SuperH
+ *
+ * Copyright (C) 1999  Niibe Yutaka
+ *
+ * Copyright (c) 2009  STMicroelectronics Ltd
+ *   Optimised using 64bit data transfer (via FPU) and the movca.l inst.
+ *   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/*
+ *            void *memset(void *s, int c, size_t n);
+ */
+
+#include <sysdep.h>
+
+#if defined (__LITTLE_ENDIAN__) && defined (__SH_FPU_ANY__)
+#define MEMSET_USES_FPU
+/* Use paired single precision load or store mode for 64-bit tranfering.
+ * FPSCR.SZ=1,FPSCR.SZ=0 is well defined on both SH4-200 and SH4-300.
+ * Currenlty it has been only implemented and tested for little endian mode. */
+.macro FPU_SET_PAIRED_PREC
+	sts	fpscr, r3
+	mov	#0x10, r1	! PR=0 SZ=1
+	shll16  r1
+	lds	r1, fpscr
+.endm
+.macro RESTORE_FPSCR
+	lds	r3, fpscr
+.endm
+#endif
+
+ENTRY(memset)
+	mov	#12,r0
+	add	r6,r4
+	cmp/gt	r6,r0
+	bt/s	40f		! if it's too small, set a byte at once
+	 mov	r4,r0
+	and	#3,r0
+	cmp/eq	#0,r0
+	bt/s	2f		! It's aligned
+	 sub	r0,r6
+1:
+	dt	r0
+	bf/s	1b
+	 mov.b	r5,@-r4
+2:				! make VVVV
+	extu.b	r5,r5
+	swap.b	r5,r0		!   V0
+	or	r0,r5		!   VV
+	swap.w	r5,r0		! VV00
+	or	r0,r5		! VVVV
+
+	! Check if enough bytes need to be copied to be worth the big loop
+	mov	#0x40, r0	! (MT)
+	cmp/gt	r6,r0		! (MT)  64 > len => slow loop
+
+	bt/s	22f
+	 mov	r6,r0
+
+	! align the dst to the cache block size if necessary
+	mov	r4, r3
+	mov	#~(0x1f), r1
+
+	and	r3, r1
+	cmp/eq	r3, r1
+
+	bt/s	11f		! dst is already aligned
+	 sub	r1, r3		! r3-r1 -> r3
+	shlr2	r3		! number of loops
+
+10:	mov.l	r5,@-r4
+	dt	r3
+	bf/s	10b
+	 add	#-4, r6
+
+11:	! dst is 32byte aligned
+	mov	r6,r2
+	mov	#-5,r0
+	shld	r0,r2		! number of loops
+
+	add	#-32, r4
+	mov	r5, r0
+
+#ifdef MEMSET_USES_FPU
+	lds	r5, fpul	! (CO)
+	fsts	fpul, fr0	! Dr0 will be 'VVVVVVVV'
+	fsts	fpul, fr1
+
+	FPU_SET_PAIRED_PREC
+12:
+	movca.l	r0, @r4
+	mov.l	r5, @(4, r4)
+	add	#32, r4
+	fmov	dr0, @-r4
+	fmov	dr0, @-r4
+	add	#-0x20, r6
+	fmov	dr0, @-r4
+	dt	r2
+	bf/s	12b
+	 add	#-40, r4
+
+	RESTORE_FPSCR
+#else
+12:
+	movca.l	r0,@r4
+	mov.l	r5,@(4, r4)
+	mov.l	r5,@(8, r4)
+	mov.l	r5,@(12,r4)
+	mov.l	r5,@(16,r4)
+	mov.l	r5,@(20,r4)
+	add	#-0x20, r6
+	mov.l	r5,@(24,r4)
+	dt	r2
+	mov.l	r5,@(28,r4)
+	bf/s	12b
+	 add	#-32, r4
+
+#endif
+	add	#32, r4
+	mov	#8, r0
+	cmp/ge	r0, r6
+	bf	40f
+
+	mov	r6,r0
+22:
+	shlr2	r0
+	shlr	r0		! r0 = r6 >> 3
+3:
+	dt	r0
+	mov.l	r5,@-r4		! set 8-byte at once
+	bf/s	3b
+	 mov.l	r5,@-r4
+	!
+	mov	#7,r0
+	and	r0,r6
+
+	! fill bytes (length may be zero)
+40:	tst	r6,r6
+	bt	5f
+4:
+	dt	r6
+	bf/s	4b
+	 mov.b	r5,@-r4
+5:
+	rts
+	 mov	r4,r0
+END(memset)
+libc_hidden_def (memset)
diff --git a/ap/build/uClibc/libc/string/sh/sh4/strcpy.S b/ap/build/uClibc/libc/string/sh/sh4/strcpy.S
new file mode 100644
index 0000000..0f82780
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sh/sh4/strcpy.S
@@ -0,0 +1,28 @@
+/* strcpy implementation for SUPERH
+ *
+ * Copyright (C) 2009 STMicroelectronics Ltd.
+ *
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/*
+	char *strcpy(char *dest, const char *src);
+ */
+
+#include <sysdep.h>
+
+ENTRY(strcpy)
+	mov	r4,r2
+1:
+	mov.b	@r5+,r1
+	tst	r1,r1
+	mov.b	r1,@r2
+	bf/s	1b
+	 add	#1,r2
+
+	rts
+	 mov	r4,r0
+END(strcpy)
+libc_hidden_def (strcpy)
diff --git a/ap/build/uClibc/libc/string/sh/sh4/strncpy.S b/ap/build/uClibc/libc/string/sh/sh4/strncpy.S
new file mode 100644
index 0000000..8a16f39
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sh/sh4/strncpy.S
@@ -0,0 +1,43 @@
+/* strncpy implementation for SUPERH
+ *
+ * Copyright (C) 2009 STMicroelectronics Ltd.
+ *
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/*
+	char *strncpy(char *dest, const char *src, size_t n);
+ */
+
+#include <sysdep.h>
+
+ENTRY(strncpy)
+	mov	#0,r0
+	bra	2f
+	 mov	r4,r2
+1:
+	mov.b	r1,@(r0,r2)
+	add	#1,r0
+2:
+	cmp/hs	r6,r0
+	bt	5f
+	mov.b	@(r0,r5),r1
+	tst	r1,r1
+	bf/s	1b
+	 cmp/hs	r6,r0
+	bra	4f
+	 nop
+3:
+	mov.b	r1,@(r0,r2)
+	add	#1,r0
+	cmp/hs	r6,r0
+4:
+	bf/s	3b
+	 mov	#0,r1
+5:
+	rts
+	 mov     r2,r0
+END(strncpy)
+libc_hidden_def(strncpy)
diff --git a/ap/build/uClibc/libc/string/sh/strlen.S b/ap/build/uClibc/libc/string/sh/strlen.S
new file mode 100644
index 0000000..1ccecc1
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sh/strlen.S
@@ -0,0 +1,75 @@
+/* $Id: strlen.S,v 1.2 2001/06/29 14:07:15 gniibe Exp $
+ *
+ * "strlen" implementation of SuperH
+ *
+ * Copyright (C) 1999  Kaz Kojima
+ *
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/* size_t strlen (const char *s)  */
+
+#include <sysdep.h>
+#include <endian.h>
+
+ENTRY(strlen)
+	mov	r4,r0
+	and	#3,r0
+	tst	r0,r0
+	bt/s	1f
+	 mov	#0,r2
+
+	add	#-1,r0
+	shll2	r0
+	shll	r0
+	braf	r0
+	 nop
+
+	mov.b	@r4+,r1
+	tst	r1,r1
+	bt	8f
+	add	#1,r2
+
+	mov.b	@r4+,r1
+	tst	r1,r1
+	bt	8f
+	add	#1,r2
+
+	mov.b	@r4+,r1
+	tst	r1,r1
+	bt	8f
+	add	#1,r2
+
+1:
+	mov	#0,r3
+2:
+	mov.l	@r4+,r1
+	cmp/str	r3,r1
+	bf/s	2b
+	 add	#4,r2
+
+	add	#-4,r2
+#ifndef __LITTLE_ENDIAN__
+	swap.b	r1,r1
+	swap.w	r1,r1
+	swap.b	r1,r1
+#endif
+	extu.b	r1,r0
+	tst	r0,r0
+	bt/s	8f
+	 shlr8	r1
+	add	#1,r2
+	extu.b	r1,r0
+	tst	r0,r0
+	bt/s	8f
+	 shlr8	r1
+	add	#1,r2
+	extu.b	r1,r0
+	tst	r0,r0
+	bt	8f
+	add	#1,r2
+8:
+	rts
+	 mov	r2,r0
+END(strlen)
+libc_hidden_def (strlen)