zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/memchr.S b/ap/build/uClibc/libc/string/sparc/sparc64/memchr.S
new file mode 100644
index 0000000..f44850b
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/memchr.S
@@ -0,0 +1,259 @@
+/* memchr (str, ch, n) -- Return pointer to first occurrence of CH in STR less
+   than N.
+   For SPARC v9.
+   Copyright (C) 1998, 1999, 2000, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
+                  Jakub Jelinek <jj@ultra.linux.cz>.
+   This version is developed using the same algorithm as the fast C
+   version which carries the following introduction:
+   Based on strlen implementation by Torbjorn Granlund (tege@sics.se),
+   with help from Dan Sahlin (dan@sics.se) and
+   commentary by Jim Blandy (jimb@ai.mit.edu);
+   adaptation to memchr suggested by Dick Karpinski (dick@cca.ucsf.edu),
+   and implemented by Roland McGrath (roland@ai.mit.edu).
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <asm/asi.h>
+#ifndef XCC
+#define XCC xcc
+#define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+#endif
+
+	/* Normally, this uses
+	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
+	   to find out if any byte in xword could be zero. This is fast, but
+	   also gives false alarm for any byte in range 0x81-0xff. It does
+	   not matter for correctness, as if this test tells us there could
+	   be some zero byte, we check it byte by byte, but if bytes with
+	   high bits set are common in the strings, then this will give poor
+	   performance. You can #define EIGHTBIT_NOT_RARE and the algorithm
+	   will use one tick slower, but more precise test
+	   ((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080),
+	   which does not give any false alarms (but if some bits are set,
+	   one cannot assume from it which bytes are zero and which are not).
+	   It is yet to be measured, what is the correct default for glibc
+	   in these days for an average user.
+	 */
+
+	.text
+	.align		32
+ENTRY(memchr)
+	and		%o1, 0xff, %o1			/* IEU0		Group		*/
+#ifdef USE_BPR
+	brz,pn		%o2, 12f			/* CTI+IEU1			*/
+#else
+	tst		%o2				/* IEU1				*/
+	be,pn		%XCC, 12f			/* CTI				*/
+#endif
+	 sll		%o1, 8, %g3			/* IEU0		Group		*/
+	add		%o0, %o2, %o2			/* IEU1				*/
+
+	sethi		%hi(0x01010101), %g1		/* IEU0		Group		*/
+	or		%g3, %o1, %g3			/* IEU1				*/
+	ldub		[%o0], %o3			/* Load				*/
+	sllx		%g3, 16, %g5			/* IEU0		Group		*/
+
+	or		%g1, %lo(0x01010101), %g1	/* IEU1				*/
+	sllx		%g1, 32, %g2			/* IEU0		Group		*/
+	or		%g3, %g5, %g3			/* IEU1				*/
+	sllx		%g3, 32, %g5			/* IEU0		Group		*/
+
+	cmp		%o3, %o1			/* IEU1				*/
+	be,pn		%xcc, 13f			/* CTI				*/
+	 or		%g1, %g2, %g1			/* IEU0		Group		*/
+	andcc		%o0, 7, %g0			/* IEU1				*/
+
+	bne,a,pn	%icc, 21f			/* CTI				*/
+	 add		%o0, 1, %o0			/* IEU0		Group		*/
+	ldx		[%o0], %o3			/* Load		Group		*/
+	sllx		%g1, 7, %g2			/* IEU0				*/
+
+	or		%g3, %g5, %g3			/* IEU1				*/
+1:	add		%o0, 8, %o0			/* IEU0		Group		*/
+	xor		%o3, %g3, %o4			/* IEU1				*/
+							/* %g1 = 0101010101010101	*
+							 * %g2 = 8080088080808080	*
+							 * %g3 =  c c c c c c c c	*
+							 * %o3 =      value		*
+							 * %o4 =   value XOR c		*/
+2:	cmp		%o0, %o2			/* IEU1		Group		*/
+
+	bg,pn		%XCC, 11f			/* CTI				*/
+	 ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	sub		%o4, %g1, %o5			/* IEU0		Group		*/
+	add		%o0, 8, %o0			/* IEU1				*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o5, %o4, %o5			/* IEU0		Group		*/
+#endif
+
+	andcc		%o5, %g2, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 2b			/* CTI				*/
+	 xor		%o3, %g3, %o4			/* IEU0				*/
+	srlx		%o4, 56, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 3f			/* CTI				*/
+	 srlx		%o4, 48, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 4f			/* CTI				*/
+	 srlx		%o4, 40, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+
+	 srlx		%o4, 32, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 6f			/* CTI				*/
+	 srlx		%o4, 24, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 7f			/* CTI				*/
+	 srlx		%o4, 16, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 8f			/* CTI				*/
+	 srlx		%o4, 8, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 9f			/* CTI				*/
+
+	 andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	bne,pt		%icc, 2b			/* CTI				*/
+	 xor		%o3, %g3, %o4			/* IEU0				*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 add		%o0, -9, %o0			/* IEU0				*/
+
+	.align		16
+3:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -16, %o0			/* IEU0				*/
+4:   	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -15, %o0			/* IEU0				*/
+
+5:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -14, %o0			/* IEU0				*/
+6:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -13, %o0			/* IEU0				*/
+
+7:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -12, %o0			/* IEU0				*/
+8:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -11, %o0			/* IEU0				*/
+
+9:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -10, %o0			/* IEU0				*/
+11:	sub		%o4, %g1, %o5			/* IEU0		Group		*/
+	sub		%o0, 8, %o0			/* IEU1				*/
+
+	andcc		%o5, %g2, %g0			/* IEU1		Group		*/
+	be,pt		%xcc, 12f			/* CTI				*/
+	 sub		%o2, %o0, %o2			/* IEU0				*/
+	tst		%o2				/* IEU1		Group		*/
+
+	be,pn		%XCC, 12f			/* CTI				*/
+	 srlx		%o4, 56, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 13f			/* CTI				*/
+
+	 cmp		%o2, 1				/* IEU0				*/
+	be,pn		%XCC, 12f			/* CTI		Group		*/
+	 srlx		%o4, 48, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 14f			/* CTI				*/
+	 cmp		%o2, 2				/* IEU1		Group		*/
+	be,pn		%XCC, 12f			/* CTI				*/
+	 srlx		%o4, 40, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 15f			/* CTI				*/
+	 cmp		%o2, 3				/* IEU1		Group		*/
+	be,pn		%XCC, 12f			/* CTI				*/
+
+	 srlx		%o4, 32, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 16f			/* CTI				*/
+	 cmp		%o2, 4				/* IEU1		Group		*/
+
+	be,pn		%XCC, 12f			/* CTI				*/
+	 srlx		%o4, 24, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 17f			/* CTI				*/
+
+	 cmp		%o2, 5				/* IEU1		Group		*/
+	be,pn		%XCC, 12f			/* CTI				*/
+	 srlx		%o4, 16, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 18f			/* CTI				*/
+	 cmp		%o2, 6				/* IEU1		Group		*/
+	be,pn		%XCC, 12f			/* CTI				*/
+	 srlx		%o4, 8, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 19f			/* CTI				*/
+	 nop						/* IEU0				*/
+12:	retl						/* CTI+IEU1	Group		*/
+
+	 clr		%o0				/* IEU0				*/
+	nop						/* Stub				*/
+13:	retl						/* CTI+IEU1	Group		*/
+	 nop						/* IEU0				*/
+
+14:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, 1, %o0			/* IEU0				*/
+15:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, 2, %o0			/* IEU0				*/
+
+16:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, 3, %o0			/* IEU0				*/
+17:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, 4, %o0			/* IEU0				*/
+
+18:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, 5, %o0			/* IEU0				*/
+19:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, 6, %o0			/* IEU0				*/
+
+21:	cmp		%o0, %o2			/* IEU1				*/
+	be,pn		%XCC, 12b			/* CTI				*/
+	 sllx		%g1, 7, %g2			/* IEU0		Group		*/
+	ldub		[%o0], %o3			/* Load				*/
+
+	or		%g3, %g5, %g3			/* IEU1				*/
+22:	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+	be,a,pn		%icc, 1b			/* CTI				*/
+	 ldx		[%o0], %o3			/* Load				*/
+
+	cmp		%o3, %o1			/* IEU1		Group		*/
+	be,pn		%xcc, 23f			/* CTI				*/
+	 add		%o0, 1, %o0			/* IEU0				*/
+	cmp		%o0, %o2			/* IEU1		Group		*/
+
+	bne,a,pt	%XCC, 22b			/* CTI				*/
+	 ldub		[%o0], %o3			/* Load				*/
+	retl						/* CTI+IEU1	Group		*/
+	 clr		%o0				/* IEU0				*/
+
+23:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -1, %o0			/* IEU0				*/
+END(memchr)
+
+libc_hidden_def(memchr)
+weak_alias(memchr,__ubp_memchr)
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/memcpy.S b/ap/build/uClibc/libc/string/sparc/sparc64/memcpy.S
new file mode 100644
index 0000000..db63d1d
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/memcpy.S
@@ -0,0 +1,923 @@
+/* Copy SIZE bytes from SRC to DEST.
+   For UltraSPARC.
+   Copyright (C) 1996, 97, 98, 99, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by David S. Miller (davem@caip.rutgers.edu) and
+		  Jakub Jelinek (jakub@redhat.com).
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <features.h>
+#include <asm/asi.h>
+#ifndef XCC
+#define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g6, #scratch
+#define XCC	xcc
+#endif
+#define FPRS_FEF	4
+
+#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9)		\
+	faligndata	%f1, %f2, %f48;				\
+	faligndata	%f2, %f3, %f50;				\
+	faligndata	%f3, %f4, %f52;				\
+	faligndata	%f4, %f5, %f54;				\
+	faligndata	%f5, %f6, %f56;				\
+	faligndata	%f6, %f7, %f58;				\
+	faligndata	%f7, %f8, %f60;				\
+	faligndata	%f8, %f9, %f62;
+
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt)	\
+	ldda		[%src] %asi, %fdest;			\
+	add		%src, 0x40, %src;			\
+	add		%dest, 0x40, %dest;			\
+	subcc		%len, 0x40, %len;			\
+	be,pn		%xcc, jmptgt;				\
+	 stda		%fsrc, [%dest - 0x40] %asi;
+
+#define LOOP_CHUNK1(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f0,  f48, len, branch_dest)
+#define LOOP_CHUNK2(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
+#define LOOP_CHUNK3(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+
+#define STORE_SYNC(dest, fsrc)					\
+	stda		%fsrc, [%dest] %asi;			\
+	add		%dest, 0x40, %dest;
+
+#define STORE_JUMP(dest, fsrc, target)				\
+	stda		%fsrc, [%dest] %asi;			\
+	add		%dest, 0x40, %dest;			\
+	ba,pt		%xcc, target;
+
+#define VISLOOP_PAD nop; nop; nop; nop; 			\
+		    nop; nop; nop; nop; 			\
+		    nop; nop; nop; nop; 			\
+		    nop; nop; nop;
+
+#define FINISH_VISCHUNK(dest, f0, f1, left)			\
+	subcc		%left, 8, %left;			\
+	bl,pn		%xcc, 205f;				\
+	 faligndata	%f0, %f1, %f48;				\
+	std		%f48, [%dest];				\
+	add		%dest, 8, %dest;
+
+#define UNEVEN_VISCHUNK(dest, f0, f1, left)			\
+	subcc		%left, 8, %left;			\
+	bl,pn		%xcc, 205f;				\
+	 fsrc1		%f0, %f1;				\
+	ba,a,pt		%xcc, 204f;
+
+	/* Macros for non-VIS memcpy code. */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3)		\
+	ldx		[%src + offset + 0x00], %t0; 		\
+	ldx		[%src + offset + 0x08], %t1; 		\
+	ldx		[%src + offset + 0x10], %t2; 		\
+	ldx		[%src + offset + 0x18], %t3; 		\
+	stw		%t0, [%dst + offset + 0x04]; 		\
+	srlx		%t0, 32, %t0;				\
+	stw		%t0, [%dst + offset + 0x00]; 		\
+	stw		%t1, [%dst + offset + 0x0c]; 		\
+	srlx		%t1, 32, %t1;				\
+	stw		%t1, [%dst + offset + 0x08]; 		\
+	stw		%t2, [%dst + offset + 0x14]; 		\
+	srlx		%t2, 32, %t2;				\
+	stw		%t2, [%dst + offset + 0x10]; 		\
+	stw		%t3, [%dst + offset + 0x1c];		\
+	srlx		%t3, 32, %t3;				\
+	stw		%t3, [%dst + offset + 0x18];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src + offset + 0x00], %t0; 		\
+	ldx		[%src + offset + 0x08], %t1; 		\
+	ldx		[%src + offset + 0x10], %t2; 		\
+	ldx		[%src + offset + 0x18], %t3; 		\
+	stx		%t0, [%dst + offset + 0x00]; 		\
+	stx		%t1, [%dst + offset + 0x08]; 		\
+	stx		%t2, [%dst + offset + 0x10]; 		\
+	stx		%t3, [%dst + offset + 0x18]; 		\
+	ldx		[%src + offset + 0x20], %t0; 		\
+	ldx		[%src + offset + 0x28], %t1; 		\
+	ldx		[%src + offset + 0x30], %t2; 		\
+	ldx		[%src + offset + 0x38], %t3; 		\
+	stx		%t0, [%dst + offset + 0x20]; 		\
+	stx		%t1, [%dst + offset + 0x28]; 		\
+	stx		%t2, [%dst + offset + 0x30]; 		\
+	stx		%t3, [%dst + offset + 0x38];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src - offset - 0x10], %t0;		\
+	ldx		[%src - offset - 0x08], %t1; 		\
+	stw		%t0, [%dst - offset - 0x0c]; 		\
+	srlx		%t0, 32, %t2;				\
+	stw		%t2, [%dst - offset - 0x10]; 		\
+	stw		%t1, [%dst - offset - 0x04]; 		\
+	srlx		%t1, 32, %t3;				\
+	stw		%t3, [%dst - offset - 0x08];
+
+#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1)		\
+	ldx		[%src - offset - 0x10], %t0; 		\
+	ldx		[%src - offset - 0x08], %t1; 		\
+	stx		%t0, [%dst - offset - 0x10]; 		\
+	stx		%t1, [%dst - offset - 0x08];
+
+	/* Macros for non-VIS memmove code. */
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src - offset - 0x20], %t0; 		\
+	ldx		[%src - offset - 0x18], %t1; 		\
+	ldx		[%src - offset - 0x10], %t2; 		\
+	ldx		[%src - offset - 0x08], %t3; 		\
+	stw		%t0, [%dst - offset - 0x1c]; 		\
+	srlx		%t0, 32, %t0;				\
+	stw		%t0, [%dst - offset - 0x20]; 		\
+	stw		%t1, [%dst - offset - 0x14]; 		\
+	srlx		%t1, 32, %t1;				\
+	stw		%t1, [%dst - offset - 0x18]; 		\
+	stw		%t2, [%dst - offset - 0x0c]; 		\
+	srlx		%t2, 32, %t2;				\
+	stw		%t2, [%dst - offset - 0x10]; 		\
+	stw		%t3, [%dst - offset - 0x04];		\
+	srlx		%t3, 32, %t3;				\
+	stw		%t3, [%dst - offset - 0x08];
+
+#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src - offset - 0x20], %t0; 		\
+	ldx		[%src - offset - 0x18], %t1; 		\
+	ldx		[%src - offset - 0x10], %t2; 		\
+	ldx		[%src - offset - 0x08], %t3; 		\
+	stx		%t0, [%dst - offset - 0x20]; 		\
+	stx		%t1, [%dst - offset - 0x18]; 		\
+	stx		%t2, [%dst - offset - 0x10]; 		\
+	stx		%t3, [%dst - offset - 0x08];		\
+	ldx		[%src - offset - 0x40], %t0; 		\
+	ldx		[%src - offset - 0x38], %t1; 		\
+	ldx		[%src - offset - 0x30], %t2; 		\
+	ldx		[%src - offset - 0x28], %t3; 		\
+	stx		%t0, [%dst - offset - 0x40]; 		\
+	stx		%t1, [%dst - offset - 0x38]; 		\
+	stx		%t2, [%dst - offset - 0x30]; 		\
+	stx		%t3, [%dst - offset - 0x28];
+
+#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src + offset + 0x00], %t0;		\
+	ldx		[%src + offset + 0x08], %t1; 		\
+	stw		%t0, [%dst + offset + 0x04]; 		\
+	srlx		%t0, 32, %t2;				\
+	stw		%t2, [%dst + offset + 0x00]; 		\
+	stw		%t1, [%dst + offset + 0x0c]; 		\
+	srlx		%t1, 32, %t3;				\
+	stw		%t3, [%dst + offset + 0x08];
+
+#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1)		\
+	ldx		[%src + offset + 0x00], %t0; 		\
+	ldx		[%src + offset + 0x08], %t1; 		\
+	stx		%t0, [%dst + offset + 0x00]; 		\
+	stx		%t1, [%dst + offset + 0x08];
+
+	.text
+	.align		32
+
+#ifdef __UCLIBC_SUSV3_LEGACY__
+ENTRY(bcopy)
+	sub		%o1, %o0, %o4			/* IEU0		Group		*/
+	mov		%o0, %g3			/* IEU1				*/
+	cmp		%o4, %o2			/* IEU1		Group		*/
+	mov		%o1, %o0			/* IEU0				*/
+	bgeu,pt		%XCC, 210f			/* CTI				*/
+	 mov		%g3, %o1			/* IEU0		Group		*/
+#ifndef USE_BPR
+	srl		%o2, 0, %o2			/* IEU1				*/
+#endif
+	brnz,pn		%o2, 220f			/* CTI		Group		*/
+	 add		%o0, %o2, %o0			/* IEU0				*/
+	retl
+	 nop
+END(bcopy)
+#endif
+
+	.align		32
+200:	be,pt		%xcc, 201f			/* CTI				*/
+	 andcc		%o0, 0x38, %g5			/* IEU1		Group		*/
+	mov		8, %g1				/* IEU0				*/
+	sub		%g1, %g2, %g2			/* IEU0		Group		*/
+	andcc		%o0, 1, %g0			/* IEU1				*/
+	be,pt		%icc, 2f			/* CTI				*/
+	 sub		%o2, %g2, %o2			/* IEU0		Group		*/
+1:	ldub		[%o1], %o5			/* Load		Group		*/
+	add		%o1, 1, %o1			/* IEU0				*/
+	add		%o0, 1, %o0			/* IEU1				*/
+	subcc		%g2, 1, %g2			/* IEU1		Group		*/
+	be,pn		%xcc, 3f			/* CTI				*/
+	 stb		%o5, [%o0 - 1]			/* Store			*/
+2:	ldub		[%o1], %o5			/* Load		Group		*/
+	add		%o0, 2, %o0			/* IEU0				*/
+	ldub		[%o1 + 1], %g3			/* Load		Group		*/
+	subcc		%g2, 2, %g2			/* IEU1		Group		*/
+	stb		%o5, [%o0 - 2]			/* Store			*/
+	add		%o1, 2, %o1			/* IEU0				*/
+	bne,pt		%xcc, 2b			/* CTI		Group		*/
+	 stb		%g3, [%o0 - 1]			/* Store			*/
+3:	andcc		%o0, 0x38, %g5			/* IEU1		Group		*/
+201:	be,pt		%icc, 202f			/* CTI				*/
+	 mov		64, %g1				/* IEU0				*/
+	fmovd		%f0, %f2			/* FPU				*/
+	sub		%g1, %g5, %g5			/* IEU0		Group		*/
+	alignaddr	%o1, %g0, %g1			/* GRU		Group		*/
+	ldd		[%g1], %f4			/* Load		Group		*/
+	sub		%o2, %g5, %o2			/* IEU0				*/
+1:	ldd		[%g1 + 0x8], %f6		/* Load		Group		*/
+	add		%g1, 0x8, %g1			/* IEU0		Group		*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f4, %f6, %f0			/* GRU		Group		*/
+	std		%f0, [%o0]			/* Store			*/
+	add		%o1, 8, %o1			/* IEU0		Group		*/
+	be,pn		%xcc, 202f			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU1				*/
+	ldd		[%g1 + 0x8], %f4		/* Load		Group		*/
+	add		%g1, 8, %g1			/* IEU0				*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f6, %f4, %f0			/* GRU		Group		*/
+	std		%f0, [%o0]			/* Store			*/
+	add		%o1, 8, %o1			/* IEU0				*/
+	bne,pt		%xcc, 1b			/* CTI		Group		*/
+	 add		%o0, 8, %o0			/* IEU0				*/
+202:	membar	  #LoadStore | #StoreStore | #StoreLoad	/* LSU		Group		*/
+	wr		%g0, ASI_BLK_P, %asi		/* LSU		Group		*/
+	subcc		%o2, 0x40, %g6			/* IEU1		Group		*/
+	mov		%o1, %g1			/* IEU0				*/
+	andncc		%g6, (0x40 - 1), %g6		/* IEU1		Group		*/
+	srl		%g1, 3, %g2			/* IEU0				*/
+	sub		%o2, %g6, %g3			/* IEU0		Group		*/
+	andn		%o1, (0x40 - 1), %o1		/* IEU1				*/
+	and		%g2, 7, %g2			/* IEU0		Group		*/
+	andncc		%g3, 0x7, %g3			/* IEU1				*/
+	fmovd		%f0, %f2			/* FPU				*/
+	sub		%g3, 0x10, %g3			/* IEU0		Group		*/
+	sub		%o2, %g6, %o2			/* IEU1				*/
+	alignaddr	%g1, %g0, %g0			/* GRU		Group		*/
+	add		%g1, %g6, %g1			/* IEU0		Group		*/
+	subcc		%o2, %g3, %o2			/* IEU1				*/
+	ldda		[%o1 + 0x00] %asi, %f0		/* LSU		Group		*/
+	add		%g1, %g3, %g1			/* IEU0				*/
+	ldda		[%o1 + 0x40] %asi, %f16		/* LSU		Group		*/
+	sub		%g6, 0x80, %g6			/* IEU0				*/
+	ldda		[%o1 + 0x80] %asi, %f32		/* LSU		Group		*/
+							/* Clk1		Group 8-(	*/
+							/* Clk2		Group 8-(	*/
+							/* Clk3		Group 8-(	*/
+							/* Clk4		Group 8-(	*/
+203:	rd		%pc, %g5			/* PDU		Group 8-(	*/
+	addcc		%g5, %lo(300f - 203b), %g5	/* IEU1		Group		*/
+	sll		%g2, 9, %g2			/* IEU0				*/
+	jmpl		%g5 + %g2, %g0			/* CTI		Group brk forced*/
+	 addcc		%o1, 0xc0, %o1			/* IEU1		Group		*/
+
+	.align		512		/* OK, here comes the fun part... */
+300:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)	LOOP_CHUNK1(o1, o0, g6, 301f)
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)	LOOP_CHUNK2(o1, o0, g6, 302f)
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)	LOOP_CHUNK3(o1, o0, g6, 303f)
+	b,pt		%xcc, 300b+4; faligndata %f0, %f2, %f48
+301:	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)	STORE_JUMP(o0, f48, 400f) membar #Sync
+302:	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)	STORE_JUMP(o0, f48, 416f) membar #Sync
+303:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)	STORE_JUMP(o0, f48, 432f) membar #Sync
+	VISLOOP_PAD
+310:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)	LOOP_CHUNK1(o1, o0, g6, 311f)
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)	LOOP_CHUNK2(o1, o0, g6, 312f)
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)	LOOP_CHUNK3(o1, o0, g6, 313f)
+	b,pt		%xcc, 310b+4; faligndata %f2, %f4, %f48
+311:	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)	STORE_JUMP(o0, f48, 402f) membar #Sync
+312:	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)	STORE_JUMP(o0, f48, 418f) membar #Sync
+313:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)	STORE_JUMP(o0, f48, 434f) membar #Sync
+	VISLOOP_PAD
+320:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)	LOOP_CHUNK1(o1, o0, g6, 321f)
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)	LOOP_CHUNK2(o1, o0, g6, 322f)
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)	LOOP_CHUNK3(o1, o0, g6, 323f)
+	b,pt		%xcc, 320b+4; faligndata %f4, %f6, %f48
+321:	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)	STORE_JUMP(o0, f48, 404f) membar #Sync
+322:	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)	STORE_JUMP(o0, f48, 420f) membar #Sync
+323:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)	STORE_JUMP(o0, f48, 436f) membar #Sync
+	VISLOOP_PAD
+330:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)	LOOP_CHUNK1(o1, o0, g6, 331f)
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)	LOOP_CHUNK2(o1, o0, g6, 332f)
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)	LOOP_CHUNK3(o1, o0, g6, 333f)
+	b,pt		%xcc, 330b+4; faligndata %f6, %f8, %f48
+331:	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)	STORE_JUMP(o0, f48, 406f) membar #Sync
+332:	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)	STORE_JUMP(o0, f48, 422f) membar #Sync
+333:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)	STORE_JUMP(o0, f48, 438f) membar #Sync
+	VISLOOP_PAD
+340:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)	LOOP_CHUNK1(o1, o0, g6, 341f)
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)	LOOP_CHUNK2(o1, o0, g6, 342f)
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)	LOOP_CHUNK3(o1, o0, g6, 343f)
+	b,pt		%xcc, 340b+4; faligndata %f8, %f10, %f48
+341:	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)	STORE_JUMP(o0, f48, 408f) membar #Sync
+342:	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)	STORE_JUMP(o0, f48, 424f) membar #Sync
+343:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)	STORE_JUMP(o0, f48, 440f) membar #Sync
+	VISLOOP_PAD
+350:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)	LOOP_CHUNK1(o1, o0, g6, 351f)
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)	LOOP_CHUNK2(o1, o0, g6, 352f)
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)	LOOP_CHUNK3(o1, o0, g6, 353f)
+	b,pt		%xcc, 350b+4; faligndata %f10, %f12, %f48
+351:	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)	STORE_JUMP(o0, f48, 410f) membar #Sync
+352:	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)	STORE_JUMP(o0, f48, 426f) membar #Sync
+353:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)	STORE_JUMP(o0, f48, 442f) membar #Sync
+	VISLOOP_PAD
+360:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)	LOOP_CHUNK1(o1, o0, g6, 361f)
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)	LOOP_CHUNK2(o1, o0, g6, 362f)
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)	LOOP_CHUNK3(o1, o0, g6, 363f)
+	b,pt		%xcc, 360b+4; faligndata %f12, %f14, %f48
+361:	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)	STORE_JUMP(o0, f48, 412f) membar #Sync
+362:	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)	STORE_JUMP(o0, f48, 428f) membar #Sync
+363:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)	STORE_JUMP(o0, f48, 444f) membar #Sync
+	VISLOOP_PAD
+370:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)	LOOP_CHUNK1(o1, o0, g6, 371f)
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)	LOOP_CHUNK2(o1, o0, g6, 372f)
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)	LOOP_CHUNK3(o1, o0, g6, 373f)
+	b,pt		%xcc, 370b+4; faligndata %f14, %f16, %f48
+371:	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)	STORE_JUMP(o0, f48, 414f) membar #Sync
+372:	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)	STORE_JUMP(o0, f48, 430f) membar #Sync
+373:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)	STORE_JUMP(o0, f48, 446f) membar #Sync
+	VISLOOP_PAD
+400:	FINISH_VISCHUNK(o0, f0,  f2,  g3)
+402:	FINISH_VISCHUNK(o0, f2,  f4,  g3)
+404:	FINISH_VISCHUNK(o0, f4,  f6,  g3)
+406:	FINISH_VISCHUNK(o0, f6,  f8,  g3)
+408:	FINISH_VISCHUNK(o0, f8,  f10, g3)
+410:	FINISH_VISCHUNK(o0, f10, f12, g3)
+412:	FINISH_VISCHUNK(o0, f12, f14, g3)
+414:	UNEVEN_VISCHUNK(o0, f14, f0,  g3)
+416:	FINISH_VISCHUNK(o0, f16, f18, g3)
+418:	FINISH_VISCHUNK(o0, f18, f20, g3)
+420:	FINISH_VISCHUNK(o0, f20, f22, g3)
+422:	FINISH_VISCHUNK(o0, f22, f24, g3)
+424:	FINISH_VISCHUNK(o0, f24, f26, g3)
+426:	FINISH_VISCHUNK(o0, f26, f28, g3)
+428:	FINISH_VISCHUNK(o0, f28, f30, g3)
+430:	UNEVEN_VISCHUNK(o0, f30, f0,  g3)
+432:	FINISH_VISCHUNK(o0, f32, f34, g3)
+434:	FINISH_VISCHUNK(o0, f34, f36, g3)
+436:	FINISH_VISCHUNK(o0, f36, f38, g3)
+438:	FINISH_VISCHUNK(o0, f38, f40, g3)
+440:	FINISH_VISCHUNK(o0, f40, f42, g3)
+442:	FINISH_VISCHUNK(o0, f42, f44, g3)
+444:	FINISH_VISCHUNK(o0, f44, f46, g3)
+446:	UNEVEN_VISCHUNK(o0, f46, f0,  g3)
+204:	ldd		[%o1], %f2			/* Load		Group		*/
+	add		%o1, 8, %o1			/* IEU0				*/
+	subcc		%g3, 8, %g3			/* IEU1				*/
+	faligndata	%f0, %f2, %f8			/* GRU		Group		*/
+	std		%f8, [%o0]			/* Store			*/
+	bl,pn		%xcc, 205f			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU0		Group		*/
+	ldd		[%o1], %f0			/* Load		Group		*/
+	add		%o1, 8, %o1			/* IEU0				*/
+	subcc		%g3, 8, %g3			/* IEU1				*/
+	faligndata	%f2, %f0, %f8			/* GRU		Group		*/
+	std		%f8, [%o0]			/* Store			*/
+	bge,pt		%xcc, 204b			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU0		Group		*/
+205:	brz,pt		%o2, 207f			/* CTI		Group		*/
+	 mov		%g1, %o1			/* IEU0				*/
+206:	ldub		[%o1], %g5			/* LOAD				*/
+	add		%o1, 1, %o1			/* IEU0				*/
+	add		%o0, 1, %o0			/* IEU1				*/
+	subcc		%o2, 1, %o2			/* IEU1				*/
+	bne,pt		%xcc, 206b			/* CTI				*/
+	 stb		%g5, [%o0 - 1]			/* Store	Group		*/
+207:	membar		#StoreLoad | #StoreStore	/* LSU		Group		*/
+	wr		%g0, FPRS_FEF, %fprs
+	retl
+	 mov		%g4, %o0
+
+208:	andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	be,pt		%icc, 2f+4			/* CTI				*/
+1:	 ldub		[%o1], %g5			/* LOAD		Group		*/
+	add		%o1, 1, %o1			/* IEU0				*/
+	add		%o0, 1, %o0			/* IEU1				*/
+	subcc		%o2, 1, %o2			/* IEU1		Group		*/
+	be,pn		%xcc, 209f			/* CTI				*/
+	 stb		%g5, [%o0 - 1]			/* Store			*/
+2:	ldub		[%o1], %g5			/* LOAD		Group		*/
+	add		%o0, 2, %o0			/* IEU0				*/
+	ldub		[%o1 + 1], %o5			/* LOAD		Group		*/
+	add		%o1, 2, %o1			/* IEU0				*/
+	subcc		%o2, 2, %o2			/* IEU1		Group		*/
+	stb		%g5, [%o0 - 2]			/* Store			*/
+	bne,pt		%xcc, 2b			/* CTI				*/
+	 stb		%o5, [%o0 - 1]			/* Store			*/
+209:	retl
+	 mov		%g4, %o0
+
+#ifdef USE_BPR
+
+	/* void *__align_cpy_4(void *dest, void *src, size_t n)
+	 * SPARC v9 SYSV ABI
+	 * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 3))
+	 */
+
+	.align		32
+ENTRY(__align_cpy_4)
+	mov		%o0, %g4			/* IEU0		Group		*/
+	cmp		%o2, 15				/* IEU1				*/
+	bleu,pn		%xcc, 208b			/* CTI				*/
+	 cmp		%o2, (64 * 6)			/* IEU1		Group		*/
+	bgeu,pn		%xcc, 200b			/* CTI				*/
+	 andcc		%o0, 7, %g2			/* IEU1		Group		*/
+	ba,pt		%xcc, 216f			/* CTI				*/
+	 andcc		%o1, 4, %g0			/* IEU1		Group		*/
+END(__align_cpy_4)
+
+	/* void *__align_cpy_8(void *dest, void *src, size_t n)
+	 * SPARC v9 SYSV ABI
+	 * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 7))
+	 */
+
+	.align		32
+ENTRY(__align_cpy_8)
+	mov		%o0, %g4			/* IEU0		Group		*/
+	cmp		%o2, 15				/* IEU1				*/
+	bleu,pn		%xcc, 208b			/* CTI				*/
+	 cmp		%o2, (64 * 6)			/* IEU1		Group		*/
+	bgeu,pn		%xcc, 201b			/* CTI				*/
+	 andcc		%o0, 0x38, %g5			/* IEU1		Group		*/
+	andcc		%o2, -128, %g6			/* IEU1		Group		*/
+	bne,a,pt	%xcc, 82f + 4			/* CTI				*/
+	 ldx		[%o1], %g1			/* Load				*/
+	ba,pt		%xcc, 41f			/* CTI		Group		*/
+	 andcc		%o2, 0x70, %g6			/* IEU1				*/
+END(__align_cpy_8)
+
+	/* void *__align_cpy_16(void *dest, void *src, size_t n)
+	 * SPARC v9 SYSV ABI
+	 * Like memcpy, but results are undefined if (!n || ((dest | src | n) & 15))
+	 */
+
+	.align		32
+ENTRY(__align_cpy_16)
+	mov		%o0, %g4			/* IEU0		Group		*/
+	cmp		%o2, (64 * 6)			/* IEU1				*/
+	bgeu,pn		%xcc, 201b			/* CTI				*/
+	 andcc		%o0, 0x38, %g5			/* IEU1		Group		*/
+	andcc		%o2, -128, %g6			/* IEU1		Group		*/
+	bne,a,pt	%xcc, 82f + 4			/* CTI				*/
+	 ldx		[%o1], %g1			/* Load				*/
+	ba,pt		%xcc, 41f			/* CTI		Group		*/
+	 andcc		%o2, 0x70, %g6			/* IEU1				*/
+END(__align_cpy_16)
+
+#endif
+
+	.align		32
+ENTRY(memcpy)
+210:
+#ifndef USE_BPR
+	srl		%o2, 0, %o2			/* IEU1		Group		*/
+#endif	
+	brz,pn		%o2, 209b			/* CTI		Group		*/
+	 mov		%o0, %g4			/* IEU0				*/
+218:	cmp		%o2, 15				/* IEU1		Group		*/
+	bleu,pn		%xcc, 208b			/* CTI				*/
+	 cmp		%o2, (64 * 6)			/* IEU1		Group		*/
+	bgeu,pn		%xcc, 200b			/* CTI				*/
+	 andcc		%o0, 7, %g2			/* IEU1		Group		*/
+	sub		%o0, %o1, %g5			/* IEU0				*/
+	andcc		%g5, 3, %o5			/* IEU1		Group		*/
+	bne,pn		%xcc, 212f			/* CTI				*/
+	 andcc		%o1, 3, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 216f			/* CTI				*/
+	 andcc		%o1, 4, %g0			/* IEU1		Group		*/
+	andcc		%o1, 1, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 4f			/* CTI				*/
+	 andcc		%o1, 2, %g0			/* IEU1		Group		*/
+	ldub		[%o1], %g2			/* Load		Group		*/
+	add		%o1, 1, %o1			/* IEU0				*/
+	add		%o0, 1, %o0			/* IEU1				*/
+	sub		%o2, 1, %o2			/* IEU0		Group		*/
+	bne,pn		%xcc, 5f			/* CTI		Group		*/
+	 stb		%g2, [%o0 - 1]			/* Store			*/
+4:	lduh		[%o1], %g2			/* Load		Group		*/
+	add		%o1, 2, %o1			/* IEU0				*/
+	add		%o0, 2, %o0			/* IEU1				*/
+	sub		%o2, 2, %o2			/* IEU0				*/
+	sth		%g2, [%o0 - 2]			/* Store	Group + bubble	*/
+5:	andcc		%o1, 4, %g0			/* IEU1				*/
+216:	be,a,pn		%xcc, 2f			/* CTI				*/
+	 andcc		%o2, -128, %g6			/* IEU1		Group		*/
+	lduw		[%o1], %g5			/* Load		Group		*/
+	add		%o1, 4, %o1			/* IEU0				*/
+	add		%o0, 4, %o0			/* IEU1				*/
+	sub		%o2, 4, %o2			/* IEU0		Group		*/
+	stw		%g5, [%o0 - 4]			/* Store			*/
+	andcc		%o2, -128, %g6			/* IEU1		Group		*/
+2:	be,pn		%xcc, 215f			/* CTI				*/
+	 andcc		%o0, 4, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 82f + 4			/* CTI		Group		*/
+5:	MOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+	MOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
+	MOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+	MOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
+35:	subcc		%g6, 128, %g6			/* IEU1		Group		*/
+	add		%o1, 128, %o1			/* IEU0				*/
+	bne,pt		%xcc, 5b			/* CTI				*/
+	 add		%o0, 128, %o0			/* IEU0		Group		*/
+215:	andcc		%o2, 0x70, %g6			/* IEU1		Group		*/
+41:	be,pn		%xcc, 80f			/* CTI				*/
+	 andcc		%o2, 8, %g0			/* IEU1		Group		*/
+							/* Clk1 8-(			*/
+							/* Clk2 8-(			*/
+							/* Clk3 8-(			*/
+							/* Clk4 8-(			*/
+79:	rd		%pc, %o5			/* PDU		Group		*/
+	sll		%g6, 1, %g5			/* IEU0		Group		*/
+	add		%o1, %g6, %o1			/* IEU1				*/
+	sub		%o5, %g5, %o5			/* IEU0  	Group		*/
+	jmpl		%o5 + %lo(80f - 79b), %g0	/* CTI		Group brk forced*/
+	 add		%o0, %g6, %o0			/* IEU0		Group		*/
+36:	MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
+	MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
+	MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
+	MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
+	MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
+	MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
+	MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
+80:	be,pt		%xcc, 81f			/* CTI				*/
+	 andcc		%o2, 4, %g0			/* IEU1				*/
+	ldx		[%o1], %g2			/* Load		Group		*/
+	add		%o0, 8, %o0			/* IEU0				*/
+	stw		%g2, [%o0 - 0x4]		/* Store	Group		*/
+	add		%o1, 8, %o1			/* IEU1				*/
+	srlx		%g2, 32, %g2			/* IEU0		Group		*/
+	stw		%g2, [%o0 - 0x8]		/* Store			*/
+81:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 2, %g0			/* IEU1		Group		*/
+	lduw		[%o1], %g2			/* Load		Group		*/
+	add		%o1, 4, %o1			/* IEU0				*/
+	stw		%g2, [%o0]			/* Store	Group		*/
+	add		%o0, 4, %o0			/* IEU0				*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	lduh		[%o1], %g2			/* Load		Group		*/
+	add		%o1, 2, %o1			/* IEU0				*/
+	sth		%g2, [%o0]			/* Store	Group		*/
+	add		%o0, 2, %o0			/* IEU0				*/
+1:	be,pt		%xcc, 211f			/* CTI				*/
+	 nop						/* IEU1				*/
+	ldub		[%o1], %g2			/* Load		Group		*/
+	stb		%g2, [%o0]			/* Store	Group + bubble	*/
+211:	retl
+	 mov		%g4, %o0
+
+82:	MOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+37:	subcc		%g6, 128, %g6			/* IEU1		Group		*/
+	add		%o1, 128, %o1			/* IEU0				*/
+	bne,pt		%xcc, 82b			/* CTI				*/
+	 add		%o0, 128, %o0			/* IEU0		Group		*/
+	andcc		%o2, 0x70, %g6			/* IEU1				*/
+	be,pn		%xcc, 84f			/* CTI				*/
+	 andcc		%o2, 8, %g0			/* IEU1		Group		*/
+							/* Clk1 8-(			*/
+							/* Clk2 8-(			*/
+							/* Clk3 8-(			*/
+							/* Clk4 8-(			*/
+83:	rd		%pc, %o5			/* PDU		Group		*/
+	add		%o1, %g6, %o1			/* IEU0		Group		*/
+	sub		%o5, %g6, %o5			/* IEU1				*/
+	jmpl		%o5 + %lo(84f - 83b), %g0	/* CTI		Group brk forced*/
+	 add		%o0, %g6, %o0			/* IEU0		Group		*/
+38:	MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
+84:	be,pt		%xcc, 85f			/* CTI		Group		*/
+	 andcc		%o2, 4, %g0			/* IEU1				*/
+	ldx		[%o1], %g2			/* Load		Group		*/
+	add		%o0, 8, %o0			/* IEU0				*/
+	add		%o1, 8, %o1			/* IEU0		Group		*/
+	stx		%g2, [%o0 - 0x8]		/* Store			*/
+85:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 2, %g0			/* IEU1		Group		*/
+	lduw		[%o1], %g2			/* Load		Group		*/
+	add		%o0, 4, %o0			/* IEU0				*/
+	add		%o1, 4, %o1			/* IEU0		Group		*/
+	stw		%g2, [%o0 - 0x4]		/* Store			*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	lduh		[%o1], %g2			/* Load		Group		*/
+	add		%o0, 2, %o0			/* IEU0				*/
+	add		%o1, 2, %o1			/* IEU0		Group		*/
+	sth		%g2, [%o0 - 0x2]		/* Store			*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 nop						/* IEU0		Group		*/
+	ldub		[%o1], %g2			/* Load		Group		*/
+	stb		%g2, [%o0]			/* Store	Group + bubble	*/
+1:	retl
+	 mov		%g4, %o0
+
+212:	brz,pt		%g2, 2f				/* CTI		Group		*/
+	 mov		8, %g1				/* IEU0				*/
+	sub		%g1, %g2, %g2			/* IEU0		Group		*/
+	sub		%o2, %g2, %o2			/* IEU0		Group		*/
+1:	ldub		[%o1], %g5			/* Load		Group		*/
+	add		%o1, 1, %o1			/* IEU0				*/
+	add		%o0, 1, %o0			/* IEU1				*/
+	subcc		%g2, 1, %g2			/* IEU1		Group		*/
+	bne,pt		%xcc, 1b			/* CTI				*/
+	 stb		%g5, [%o0 - 1]			/* Store			*/
+2:	andn		%o2, 7, %g5 			/* IEU0		Group		*/
+	and		%o2, 7, %o2			/* IEU1				*/
+	fmovd		%f0, %f2			/* FPU				*/
+	alignaddr	%o1, %g0, %g1			/* GRU		Group		*/
+	ldd		[%g1], %f4			/* Load		Group		*/
+1:	ldd		[%g1 + 0x8], %f6		/* Load		Group		*/
+	add		%g1, 0x8, %g1			/* IEU0		Group		*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f4, %f6, %f0			/* GRU		Group		*/
+	std		%f0, [%o0]			/* Store			*/
+	add		%o1, 8, %o1			/* IEU0		Group		*/
+	be,pn		%xcc, 213f			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU1				*/
+	ldd		[%g1 + 0x8], %f4		/* Load		Group		*/
+	add		%g1, 8, %g1			/* IEU0				*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f6, %f4, %f0			/* GRU		Group		*/
+	std		%f0, [%o0]			/* Store			*/
+	add		%o1, 8, %o1			/* IEU0				*/
+	bne,pn		%xcc, 1b			/* CTI		Group		*/
+	 add		%o0, 8, %o0			/* IEU0				*/
+213:	brz,pn		%o2, 214f			/* CTI		Group		*/
+	 nop						/* IEU0				*/
+	ldub		[%o1], %g5			/* LOAD				*/
+	add		%o1, 1, %o1			/* IEU0				*/
+	add		%o0, 1, %o0			/* IEU1				*/
+	subcc		%o2, 1, %o2			/* IEU1				*/
+	bne,pt		%xcc, 206b			/* CTI				*/
+	 stb		%g5, [%o0 - 1]			/* Store	Group		*/
+214:	wr		%g0, FPRS_FEF, %fprs
+	retl
+	 mov		%g4, %o0
+END(memcpy)
+libc_hidden_def(memcpy)
+
+	.align		32
+228:	andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	be,pt		%icc, 2f+4			/* CTI				*/
+1:	 ldub		[%o1 - 1], %o5			/* LOAD		Group		*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	subcc		%o2, 1, %o2			/* IEU1		Group		*/
+	be,pn		%xcc, 229f			/* CTI				*/
+	 stb		%o5, [%o0]			/* Store			*/
+2:	ldub		[%o1 - 1], %o5			/* LOAD		Group		*/
+	sub		%o0, 2, %o0			/* IEU0				*/
+	ldub		[%o1 - 2], %g5			/* LOAD		Group		*/
+	sub		%o1, 2, %o1			/* IEU0				*/
+	subcc		%o2, 2, %o2			/* IEU1		Group		*/
+	stb		%o5, [%o0 + 1]			/* Store			*/
+	bne,pt		%xcc, 2b			/* CTI				*/
+	 stb		%g5, [%o0]			/* Store			*/
+229:	retl
+	 mov		%g4, %o0
+219:	retl
+	 nop
+
+	.align		32
+ENTRY(memmove)
+#ifndef USE_BPR
+	srl		%o2, 0, %o2			/* IEU1		Group		*/
+#endif
+	brz,pn		%o2, 219b			/* CTI		Group		*/
+	 sub		%o0, %o1, %o4			/* IEU0				*/
+	cmp		%o4, %o2			/* IEU1		Group		*/
+	bgeu,pt		%XCC, 218b			/* CTI				*/
+	 mov		%o0, %g4			/* IEU0				*/
+	add		%o0, %o2, %o0			/* IEU0		Group		*/
+220:	add		%o1, %o2, %o1			/* IEU1				*/
+	cmp		%o2, 15				/* IEU1		Group		*/
+	bleu,pn		%xcc, 228b			/* CTI				*/
+	 andcc		%o0, 7, %g2			/* IEU1		Group		*/
+	sub		%o0, %o1, %g5			/* IEU0				*/
+	andcc		%g5, 3, %o5			/* IEU1		Group		*/
+	bne,pn		%xcc, 232f			/* CTI				*/
+	 andcc		%o1, 3, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 236f			/* CTI				*/
+	 andcc		%o1, 4, %g0			/* IEU1		Group		*/
+	andcc		%o1, 1, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 4f			/* CTI				*/
+	 andcc		%o1, 2, %g0			/* IEU1		Group		*/
+	ldub		[%o1 - 1], %g2			/* Load		Group		*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	sub		%o2, 1, %o2			/* IEU0		Group		*/
+	be,pn		%xcc, 5f			/* CTI		Group		*/
+	 stb		%g2, [%o0]			/* Store			*/
+4:	lduh		[%o1 - 2], %g2			/* Load		Group		*/
+	sub		%o1, 2, %o1			/* IEU0				*/
+	sub		%o0, 2, %o0			/* IEU1				*/
+	sub		%o2, 2, %o2			/* IEU0				*/
+	sth		%g2, [%o0]			/* Store	Group + bubble	*/
+5:	andcc		%o1, 4, %g0			/* IEU1				*/
+236:	be,a,pn		%xcc, 2f			/* CTI				*/
+	 andcc		%o2, -128, %g6			/* IEU1		Group		*/
+	lduw		[%o1 - 4], %g5			/* Load		Group		*/
+	sub		%o1, 4, %o1			/* IEU0				*/
+	sub		%o0, 4, %o0			/* IEU1				*/
+	sub		%o2, 4, %o2			/* IEU0		Group		*/
+	stw		%g5, [%o0]			/* Store			*/
+	andcc		%o2, -128, %g6			/* IEU1		Group		*/
+2:	be,pn		%xcc, 235f			/* CTI				*/
+	 andcc		%o0, 4, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 282f + 4			/* CTI		Group		*/
+5:	RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+	RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
+	RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+	RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
+	subcc		%g6, 128, %g6			/* IEU1		Group		*/
+	sub		%o1, 128, %o1			/* IEU0				*/
+	bne,pt		%xcc, 5b			/* CTI				*/
+	 sub		%o0, 128, %o0			/* IEU0		Group		*/
+235:	andcc		%o2, 0x70, %g6			/* IEU1		Group		*/
+41:	be,pn		%xcc, 280f			/* CTI				*/
+	 andcc		%o2, 8, %g0			/* IEU1		Group		*/
+							/* Clk1 8-(			*/
+							/* Clk2 8-(			*/
+							/* Clk3 8-(			*/
+							/* Clk4 8-(			*/
+279:	rd		%pc, %o5			/* PDU		Group		*/
+	sll		%g6, 1, %g5			/* IEU0		Group		*/
+	sub		%o1, %g6, %o1			/* IEU1				*/
+	sub		%o5, %g5, %o5			/* IEU0  	Group		*/
+	jmpl		%o5 + %lo(280f - 279b), %g0	/* CTI		Group brk forced*/
+	 sub		%o0, %g6, %o0			/* IEU0		Group		*/
+	RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
+280:	be,pt		%xcc, 281f			/* CTI				*/
+	 andcc		%o2, 4, %g0			/* IEU1				*/
+	ldx		[%o1 - 8], %g2			/* Load		Group		*/
+	sub		%o0, 8, %o0			/* IEU0				*/
+	stw		%g2, [%o0 + 4]			/* Store	Group		*/
+	sub		%o1, 8, %o1			/* IEU1				*/
+	srlx		%g2, 32, %g2			/* IEU0		Group		*/
+	stw		%g2, [%o0]			/* Store			*/
+281:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 2, %g0			/* IEU1		Group		*/
+	lduw		[%o1 - 4], %g2			/* Load		Group		*/
+	sub		%o1, 4, %o1			/* IEU0				*/
+	stw		%g2, [%o0 - 4]			/* Store	Group		*/
+	sub		%o0, 4, %o0			/* IEU0				*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	lduh		[%o1 - 2], %g2			/* Load		Group		*/
+	sub		%o1, 2, %o1			/* IEU0				*/
+	sth		%g2, [%o0 - 2]			/* Store	Group		*/
+	sub		%o0, 2, %o0			/* IEU0				*/
+1:	be,pt		%xcc, 211f			/* CTI				*/
+	 nop						/* IEU1				*/
+	ldub		[%o1 - 1], %g2			/* Load		Group		*/
+	stb		%g2, [%o0 - 1]			/* Store	Group + bubble	*/
+211:	retl
+	 mov		%g4, %o0
+
+282:	RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+	RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+	subcc		%g6, 128, %g6			/* IEU1		Group		*/
+	sub		%o1, 128, %o1			/* IEU0				*/
+	bne,pt		%xcc, 282b			/* CTI				*/
+	 sub		%o0, 128, %o0			/* IEU0		Group		*/
+	andcc		%o2, 0x70, %g6			/* IEU1				*/
+	be,pn		%xcc, 284f			/* CTI				*/
+	 andcc		%o2, 8, %g0			/* IEU1		Group		*/
+							/* Clk1 8-(			*/
+							/* Clk2 8-(			*/
+							/* Clk3 8-(			*/
+							/* Clk4 8-(			*/
+283:	rd		%pc, %o5			/* PDU		Group		*/
+	sub		%o1, %g6, %o1			/* IEU0		Group		*/
+	sub		%o5, %g6, %o5			/* IEU1				*/
+	jmpl		%o5 + %lo(284f - 283b), %g0	/* CTI		Group brk forced*/
+	 sub		%o0, %g6, %o0			/* IEU0		Group		*/
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
+284:	be,pt		%xcc, 285f			/* CTI		Group		*/
+	 andcc		%o2, 4, %g0			/* IEU1				*/
+	ldx		[%o1 - 8], %g2			/* Load		Group		*/
+	sub		%o0, 8, %o0			/* IEU0				*/
+	sub		%o1, 8, %o1			/* IEU0		Group		*/
+	stx		%g2, [%o0]			/* Store			*/
+285:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 2, %g0			/* IEU1		Group		*/
+	lduw		[%o1 - 4], %g2			/* Load		Group		*/
+	sub		%o0, 4, %o0			/* IEU0				*/
+	sub		%o1, 4, %o1			/* IEU0		Group		*/
+	stw		%g2, [%o0]			/* Store			*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	lduh		[%o1 - 2], %g2			/* Load		Group		*/
+	sub		%o0, 2, %o0			/* IEU0				*/
+	sub		%o1, 2, %o1			/* IEU0		Group		*/
+	sth		%g2, [%o0]			/* Store			*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 nop						/* IEU0		Group		*/
+	ldub		[%o1 - 1], %g2			/* Load		Group		*/
+	stb		%g2, [%o0 - 1]			/* Store	Group + bubble	*/
+1:	retl
+	 mov		%g4, %o0
+
+232:	brz,pt		%g2, 2f				/* CTI		Group		*/
+	 sub		%o2, %g2, %o2			/* IEU0		Group		*/
+1:	ldub		[%o1 - 1], %g5			/* Load		Group		*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	subcc		%g2, 1, %g2			/* IEU1		Group		*/
+	bne,pt		%xcc, 1b			/* CTI				*/
+	 stb		%g5, [%o0]			/* Store			*/
+2:	andn		%o2, 7, %g5 			/* IEU0		Group		*/
+	and		%o2, 7, %o2			/* IEU1				*/
+	fmovd		%f0, %f2			/* FPU				*/
+	alignaddr	%o1, %g0, %g1			/* GRU		Group		*/
+	ldd		[%g1], %f4			/* Load		Group		*/
+1:	ldd		[%g1 - 8], %f6			/* Load		Group		*/
+	sub		%g1, 8, %g1			/* IEU0		Group		*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f6, %f4, %f0			/* GRU		Group		*/
+	std		%f0, [%o0 - 8]			/* Store			*/
+	sub		%o1, 8, %o1			/* IEU0		Group		*/
+	be,pn		%xcc, 233f			/* CTI				*/
+	 sub		%o0, 8, %o0			/* IEU1				*/
+	ldd		[%g1 - 8], %f4			/* Load		Group		*/
+	sub		%g1, 8, %g1			/* IEU0				*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f4, %f6, %f0			/* GRU		Group		*/
+	std		%f0, [%o0 - 8]			/* Store			*/
+	sub		%o1, 8, %o1			/* IEU0				*/
+	bne,pn		%xcc, 1b			/* CTI		Group		*/
+	 sub		%o0, 8, %o0			/* IEU0				*/
+233:	brz,pn		%o2, 234f			/* CTI		Group		*/
+	 nop						/* IEU0				*/
+237:	ldub		[%o1 - 1], %g5			/* LOAD				*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	subcc		%o2, 1, %o2			/* IEU1				*/
+	bne,pt		%xcc, 237b			/* CTI				*/
+	 stb		%g5, [%o0]			/* Store	Group		*/
+234:	wr		%g0, FPRS_FEF, %fprs
+	retl
+	 mov		%g4, %o0
+END(memmove)
+libc_hidden_def(memmove)
+
+#ifdef USE_BPR
+weak_alias(memcpy,__align_cpy_1)
+weak_alias(memcpy,__align_cpy_2)
+#endif
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/memset.S b/ap/build/uClibc/libc/string/sparc/sparc64/memset.S
new file mode 100644
index 0000000..50e404b
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/memset.S
@@ -0,0 +1,317 @@
+/* Set a block of memory to some byte value.
+   For UltraSPARC.
+   Copyright (C) 1996, 97, 98, 99, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by David S. Miller (davem@caip.rutgers.edu) and
+                  Jakub Jelinek (jj@ultra.linux.cz).
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <features.h>
+#include <asm/asi.h>
+#ifndef XCC
+#define XCC xcc
+#define USE_BPR
+#endif
+#define FPRS_FEF	4
+
+#define SET_BLOCKS(base, offset, source)		\
+	stx		source, [base - offset - 0x18];	\
+	stx		source, [base - offset - 0x10];	\
+	stx		source, [base - offset - 0x08];	\
+	stx		source, [base - offset - 0x00];
+
+	/* Well, memset is a lot easier to get right than bcopy... */
+	.text
+	.align		32
+ENTRY(memset)
+	andcc		%o1, 0xff, %o1
+	mov		%o0, %o5
+	be,a,pt		%icc, 50f
+#ifndef USE_BPR
+	 srl		%o2, 0, %o1
+#else
+	 mov		%o2, %o1
+#endif
+	cmp		%o2, 7
+#ifndef USE_BPR
+	srl		%o2, 0, %o2
+#endif
+	bleu,pn		%XCC, 17f
+	 andcc		%o0, 3, %g5
+	be,pt		%xcc, 4f
+	 and		%o1, 0xff, %o1
+	cmp		%g5, 3
+	be,pn		%xcc, 2f
+	 stb		%o1, [%o0 + 0x00]
+	cmp		%g5, 2
+	be,pt		%xcc, 2f
+	 stb		%o1, [%o0 + 0x01]
+	stb		%o1, [%o0 + 0x02]
+2:	sub		%g5, 4, %g5
+	sub		%o0, %g5, %o0
+	add		%o2, %g5, %o2
+4:	sllx		%o1, 8, %g1
+	andcc		%o0, 4, %g0
+	or		%o1, %g1, %o1
+	sllx		%o1, 16, %g1
+	or		%o1, %g1, %o1
+	be,pt		%xcc, 2f
+	 sllx		%o1, 32, %g1
+	stw		%o1, [%o0]
+	sub		%o2, 4, %o2
+	add		%o0, 4, %o0
+2:	cmp		%o2, 128
+	or		%o1, %g1, %o1
+	blu,pn		%xcc, 9f
+	 andcc		%o0, 0x38, %g5
+	be,pn		%icc, 6f
+	 mov		64, %o4
+	andcc		%o0, 8, %g0
+	be,pn		%icc, 1f
+	 sub		%o4, %g5, %o4
+	stx		%o1, [%o0]
+	add		%o0, 8, %o0
+1:	andcc		%o4, 16, %g0
+	be,pn		%icc, 1f
+	 sub		%o2, %o4, %o2
+	stx		%o1, [%o0]
+	stx		%o1, [%o0 + 8]
+	add		%o0, 16, %o0
+1:	andcc		%o4, 32, %g0
+	be,pn		%icc, 7f
+	 andncc		%o2, 0x3f, %o3
+	stw		%o1, [%o0]
+	stw		%o1, [%o0 + 4]
+	stw		%o1, [%o0 + 8]
+	stw		%o1, [%o0 + 12]
+	stw		%o1, [%o0 + 16]
+	stw		%o1, [%o0 + 20]
+	stw		%o1, [%o0 + 24]
+	stw		%o1, [%o0 + 28]
+	add		%o0, 32, %o0
+7:	be,pn		%xcc, 9f
+	 nop
+	ldd		[%o0 - 8], %f0
+18:	wr		%g0, ASI_BLK_P, %asi
+	membar		#StoreStore | #LoadStore
+	andcc		%o3, 0xc0, %g5
+	and		%o2, 0x3f, %o2
+	fmovd		%f0, %f2
+	fmovd		%f0, %f4
+	andn		%o3, 0xff, %o3
+	fmovd		%f0, %f6
+	cmp		%g5, 64
+	fmovd		%f0, %f8
+	fmovd		%f0, %f10
+	fmovd		%f0, %f12
+	brz,pn		%g5, 10f
+	 fmovd		%f0, %f14
+	be,pn		%icc, 2f
+	 stda		%f0, [%o0 + 0x00] %asi
+	cmp		%g5, 128
+	be,pn		%icc, 2f
+	 stda		%f0, [%o0 + 0x40] %asi
+	stda		%f0, [%o0 + 0x80] %asi
+2:	brz,pn		%o3, 12f
+	 add		%o0, %g5, %o0
+10:	stda		%f0, [%o0 + 0x00] %asi
+	stda		%f0, [%o0 + 0x40] %asi
+	stda		%f0, [%o0 + 0x80] %asi
+	stda		%f0, [%o0 + 0xc0] %asi
+11:	subcc		%o3, 256, %o3
+	bne,pt		%xcc, 10b
+	 add		%o0, 256, %o0
+12:	wr		%g0, FPRS_FEF, %fprs
+	membar		#StoreLoad | #StoreStore
+9:	andcc		%o2, 0x78, %g5
+	be,pn		%xcc, 13f
+	 andcc		%o2, 7, %o2
+14:	rd		%pc, %o4
+	srl		%g5, 1, %o3
+	sub		%o4, %o3, %o4
+	jmpl		%o4 + (13f - 14b), %g0
+	 add		%o0, %g5, %o0
+12:	SET_BLOCKS	(%o0, 0x68, %o1)
+	SET_BLOCKS	(%o0, 0x48, %o1)
+	SET_BLOCKS	(%o0, 0x28, %o1)
+	SET_BLOCKS	(%o0, 0x08, %o1)
+13:	be,pn		%xcc, 8f
+	 andcc		%o2, 4, %g0
+	be,pn		%xcc, 1f
+	 andcc		%o2, 2, %g0
+	stw		%o1, [%o0]
+	add		%o0, 4, %o0
+1:	be,pn		%xcc, 1f
+	 andcc		%o2, 1, %g0
+	sth		%o1, [%o0]
+	add		%o0, 2, %o0
+1:	bne,a,pn	%xcc, 8f
+	 stb		%o1, [%o0]
+8:	retl
+	 mov		%o5, %o0
+17:	brz,pn		%o2, 0f
+8:	 add		%o0, 1, %o0
+	subcc		%o2, 1, %o2
+	bne,pt		%xcc, 8b
+	 stb		%o1, [%o0 - 1]
+0:	retl
+	 mov		%o5, %o0
+
+6:	stx		%o1, [%o0]
+	andncc		%o2, 0x3f, %o3
+	be,pn		%xcc, 9b
+	 nop
+	ba,pt		%xcc, 18b
+	 ldd		[%o0], %f0
+END(memset)
+libc_hidden_def(memset)
+
+#define ZERO_BLOCKS(base, offset, source)		\
+	stx		source, [base - offset - 0x38];	\
+	stx		source, [base - offset - 0x30];	\
+	stx		source, [base - offset - 0x28];	\
+	stx		source, [base - offset - 0x20];	\
+	stx		source, [base - offset - 0x18];	\
+	stx		source, [base - offset - 0x10];	\
+	stx		source, [base - offset - 0x08];	\
+	stx		source, [base - offset - 0x00];
+
+	.text
+	.align		32
+#ifdef __UCLIBC_SUSV3_LEGACY__
+ENTRY(bzero)
+#ifndef USE_BPR
+	srl		%o1, 0, %o1
+#endif
+	mov		%o0, %o5
+#endif
+50:	cmp		%o1, 7
+	bleu,pn		%xcc, 17f
+	 andcc		%o0, 3, %o2
+	be,a,pt		%xcc, 4f
+	 andcc		%o0, 4, %g0
+	cmp		%o2, 3
+	be,pn		%xcc, 2f
+	 stb		%g0, [%o0 + 0x00]
+	cmp		%o2, 2
+	be,pt		%xcc, 2f
+	 stb		%g0, [%o0 + 0x01]
+	stb		%g0, [%o0 + 0x02]
+2:	sub		%o2, 4, %o2
+	sub		%o0, %o2, %o0
+	add		%o1, %o2, %o1
+	andcc		%o0, 4, %g0
+4:	be,pt		%xcc, 2f
+	 cmp		%o1, 128
+	stw		%g0, [%o0]
+	sub		%o1, 4, %o1
+	add		%o0, 4, %o0
+2:	blu,pn		%xcc, 9f
+	 andcc		%o0, 0x38, %o2
+	be,pn		%icc, 6f
+	 mov		64, %o4
+	andcc		%o0, 8, %g0
+	be,pn		%icc, 1f
+	 sub		%o4, %o2, %o4
+	stx		%g0, [%o0]
+	add		%o0, 8, %o0
+1:	andcc		%o4, 16, %g0
+	be,pn		%icc, 1f
+	 sub		%o1, %o4, %o1
+	stx		%g0, [%o0]
+	stx		%g0, [%o0 + 8]
+	add		%o0, 16, %o0
+1:	andcc		%o4, 32, %g0
+	be,pn		%icc, 7f
+	 andncc		%o1, 0x3f, %o3
+	stx		%g0, [%o0]
+	stx		%g0, [%o0 + 8]
+	stx		%g0, [%o0 + 16]
+	stx		%g0, [%o0 + 24]
+	add		%o0, 32, %o0
+6:	andncc		%o1, 0x3f, %o3
+7:	be,pn		%xcc, 9f
+	 wr		%g0, ASI_BLK_P, %asi
+	membar		#StoreLoad | #StoreStore | #LoadStore
+	fzero		%f0
+	andcc		%o3, 0xc0, %o2
+	and		%o1, 0x3f, %o1
+	fzero		%f2
+	andn		%o3, 0xff, %o3
+	faddd		%f0, %f2, %f4
+	fmuld		%f0, %f2, %f6
+	cmp		%o2, 64
+	faddd		%f0, %f2, %f8
+	fmuld		%f0, %f2, %f10
+	faddd		%f0, %f2, %f12
+	brz,pn		%o2, 10f
+	 fmuld		%f0, %f2, %f14
+	be,pn		%icc, 2f
+	 stda		%f0, [%o0 + 0x00] %asi
+	cmp		%o2, 128
+	be,pn		%icc, 2f
+	 stda		%f0, [%o0 + 0x40] %asi
+	stda		%f0, [%o0 + 0x80] %asi
+2:	brz,pn		%o3, 12f
+	 add		%o0, %o2, %o0
+10:	stda		%f0, [%o0 + 0x00] %asi
+	stda		%f0, [%o0 + 0x40] %asi
+	stda		%f0, [%o0 + 0x80] %asi
+	stda		%f0, [%o0 + 0xc0] %asi
+11:	subcc		%o3, 256, %o3
+	bne,pt		%xcc, 10b
+	 add		%o0, 256, %o0
+12:	wr		%g0, FPRS_FEF, %fprs
+	membar		#StoreLoad | #StoreStore
+9:	andcc		%o1, 0xf8, %o2
+	be,pn		%xcc, 13f
+	 andcc		%o1, 7, %o1
+14:	rd		%pc, %o4
+	srl		%o2, 1, %o3
+	sub		%o4, %o3, %o4
+	jmpl		%o4 + (13f - 14b), %g0
+	 add		%o0, %o2, %o0
+12:	ZERO_BLOCKS	(%o0, 0xc8, %g0)
+	ZERO_BLOCKS	(%o0, 0x88, %g0)
+	ZERO_BLOCKS	(%o0, 0x48, %g0)
+	ZERO_BLOCKS	(%o0, 0x08, %g0)
+13:	be,pn		%xcc, 8f
+	 andcc		%o1, 4, %g0
+	be,pn		%xcc, 1f
+	 andcc		%o1, 2, %g0
+	stw		%g0, [%o0]
+	add		%o0, 4, %o0
+1:	be,pn		%xcc, 1f
+	 andcc		%o1, 1, %g0
+	sth		%g0, [%o0]
+	add		%o0, 2, %o0
+1:	bne,a,pn	%xcc, 8f
+	 stb		%g0, [%o0]
+8:	retl
+	 mov		%o5, %o0
+17:	be,pn		%xcc, 13b
+	 orcc		%o1, 0, %g0
+	be,pn		%xcc, 0f
+8:	 add		%o0, 1, %o0
+	subcc		%o1, 1, %o1
+	bne,pt		%xcc, 8b
+	 stb		%g0, [%o0 - 1]
+0:	retl
+	 mov		%o5, %o0
+#ifdef __UCLIBC_SUSV3_LEGACY__
+END(bzero)
+#endif
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/sparcv9b/memcpy.S b/ap/build/uClibc/libc/string/sparc/sparc64/sparcv9b/memcpy.S
new file mode 100644
index 0000000..64f6a92
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/sparcv9b/memcpy.S
@@ -0,0 +1,612 @@
+/* Copy SIZE bytes from SRC to DEST.
+   For UltraSPARC-III.
+   Copyright (C) 2001, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by David S. Miller (davem@redhat.com)
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <features.h>
+
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF  0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+
+#ifndef XCC
+#define USE_BPR
+#define XCC xcc
+#endif
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+	.register	%g6,#scratch
+
+	.text
+	.align	32
+
+#ifdef __UCLIBC_SUSV3_LEGACY__
+ENTRY(bcopy)
+	sub		%o1, %o0, %o4
+	mov		%o0, %g4
+	cmp		%o4, %o2
+	mov		%o1, %o0
+	bgeu,pt		%XCC, 100f
+	 mov		%g4, %o1
+#ifndef USE_BPR
+	srl		%o2, 0, %o2
+#endif
+	brnz,pn		%o2, 220f
+	 add		%o0, %o2, %o0
+	retl
+	 nop
+END(bcopy)
+#endif
+
+	/* Special/non-trivial issues of this code:
+	 *
+	 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
+	 * 2) Only low 32 FPU registers are used so that only the
+	 *    lower half of the FPU register set is dirtied by this
+	 *    code.  This is especially important in the kernel.
+	 * 3) This code never prefetches cachelines past the end
+	 *    of the source buffer.
+	 *
+	 * The cheetah's flexible spine, oversized liver, enlarged heart,
+	 * slender muscular body, and claws make it the swiftest hunter
+	 * in Africa and the fastest animal on land.  Can reach speeds
+	 * of up to 2.4GB per second.
+	 */
+	.align		32
+ENTRY(memcpy)
+
+100: /* %o0=dst, %o1=src, %o2=len */
+	mov		%o0, %g5
+	cmp		%o2, 0
+	be,pn		%XCC, out
+218:	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	bleu,a,pn	%XCC, small_copy
+	 or		%o3, %o2, %o3
+
+	cmp		%o2, 256
+	blu,pt		%XCC, medium_copy
+	 andcc		%o3, 0x7, %g0
+
+	ba,pt		%xcc, enter
+	 andcc		%o0, 0x3f, %g2
+
+	/* Here len >= 256 and condition codes reflect execution
+	 * of "andcc %o0, 0x7, %g2", done by caller.
+	 */
+	.align		64
+enter:
+	/* Is 'dst' already aligned on an 64-byte boundary? */
+	be,pt		%XCC, 2f
+
+	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
+	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
+	 * subtract this from 'len'.
+	 */
+	 sub		%g2, 0x40, %g2
+	sub		%g0, %g2, %g2
+	sub		%o2, %g2, %o2
+
+	/* Copy %g2 bytes from src to dst, one byte at a time. */
+1:	ldub		[%o1 + 0x00], %o3
+	add		%o1, 0x1, %o1
+	add		%o0, 0x1, %o0
+	subcc		%g2, 0x1, %g2
+
+	bg,pt		%XCC, 1b
+	 stb		%o3, [%o0 + -1]
+
+2:	VISEntryHalf
+	and		%o1, 0x7, %g1
+	ba,pt		%xcc, begin
+	 alignaddr	%o1, %g0, %o1
+
+	.align		64
+begin:
+	prefetch	[%o1 + 0x000], #one_read
+	prefetch	[%o1 + 0x040], #one_read
+	andn		%o2, (0x40 - 1), %o4
+	prefetch	[%o1 + 0x080], #one_read
+	prefetch	[%o1 + 0x0c0], #one_read
+	ldd		[%o1 + 0x000], %f0
+	prefetch	[%o1 + 0x100], #one_read
+	ldd		[%o1 + 0x008], %f2
+	prefetch	[%o1 + 0x140], #one_read
+	ldd		[%o1 + 0x010], %f4
+	prefetch	[%o1 + 0x180], #one_read
+	faligndata	%f0, %f2, %f16
+	ldd		[%o1 + 0x018], %f6
+	faligndata	%f2, %f4, %f18
+	ldd		[%o1 + 0x020], %f8
+	faligndata	%f4, %f6, %f20
+	ldd		[%o1 + 0x028], %f10
+	faligndata	%f6, %f8, %f22
+
+	ldd		[%o1 + 0x030], %f12
+	faligndata	%f8, %f10, %f24
+	ldd		[%o1 + 0x038], %f14
+	faligndata	%f10, %f12, %f26
+	ldd		[%o1 + 0x040], %f0
+
+	sub		%o4, 0x80, %o4
+	add		%o1, 0x40, %o1
+	ba,pt		%xcc, loop
+	 srl		%o4, 6, %o3
+
+	.align		64
+loop:
+	ldd		[%o1 + 0x008], %f2
+	faligndata	%f12, %f14, %f28
+	ldd		[%o1 + 0x010], %f4
+	faligndata	%f14, %f0, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x018], %f6
+	faligndata	%f0, %f2, %f16
+
+	ldd		[%o1 + 0x020], %f8
+	faligndata	%f2, %f4, %f18
+	ldd		[%o1 + 0x028], %f10
+	faligndata	%f4, %f6, %f20
+	ldd		[%o1 + 0x030], %f12
+	faligndata	%f6, %f8, %f22
+	ldd		[%o1 + 0x038], %f14
+	faligndata	%f8, %f10, %f24
+
+	ldd		[%o1 + 0x040], %f0
+	prefetch	[%o1 + 0x180], #one_read
+	faligndata	%f10, %f12, %f26
+	subcc		%o3, 0x01, %o3
+	add		%o1, 0x40, %o1
+	bg,pt		%XCC, loop
+	 add		%o0, 0x40, %o0
+
+	/* Finally we copy the last full 64-byte block. */
+loopfini:
+	ldd		[%o1 + 0x008], %f2
+	faligndata	%f12, %f14, %f28
+	ldd		[%o1 + 0x010], %f4
+	faligndata	%f14, %f0, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x018], %f6
+	faligndata	%f0, %f2, %f16
+	ldd		[%o1 + 0x020], %f8
+	faligndata	%f2, %f4, %f18
+	ldd		[%o1 + 0x028], %f10
+	faligndata	%f4, %f6, %f20
+	ldd		[%o1 + 0x030], %f12
+	faligndata	%f6, %f8, %f22
+	ldd		[%o1 + 0x038], %f14
+	faligndata	%f8, %f10, %f24
+	cmp		%g1, 0
+	be,pt		%XCC, 1f
+	 add		%o0, 0x40, %o0
+	ldd		[%o1 + 0x040], %f0
+1:	faligndata	%f10, %f12, %f26
+	faligndata	%f12, %f14, %f28
+	faligndata	%f14, %f0, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	add		%o0, 0x40, %o0
+	add		%o1, 0x40, %o1
+	membar		#Sync
+
+	/* Now we copy the (len modulo 64) bytes at the end.
+	 * Note how we borrow the %f0 loaded above.
+	 *
+	 * Also notice how this code is careful not to perform a
+	 * load past the end of the src buffer.
+	 */
+loopend:
+	and		%o2, 0x3f, %o2
+	andcc		%o2, 0x38, %g2
+	be,pn		%XCC, endcruft
+	 subcc		%g2, 0x8, %g2
+	be,pn		%XCC, endcruft
+	 cmp		%g1, 0
+
+	be,a,pt		%XCC, 1f
+	 ldd		[%o1 + 0x00], %f0
+
+1:	ldd		[%o1 + 0x08], %f2
+	add		%o1, 0x8, %o1
+	sub		%o2, 0x8, %o2
+	subcc		%g2, 0x8, %g2
+	faligndata	%f0, %f2, %f8
+	std		%f8, [%o0 + 0x00]
+	be,pn		%XCC, endcruft
+	 add		%o0, 0x8, %o0
+	ldd		[%o1 + 0x08], %f0
+	add		%o1, 0x8, %o1
+	sub		%o2, 0x8, %o2
+	subcc		%g2, 0x8, %g2
+	faligndata	%f2, %f0, %f8
+	std		%f8, [%o0 + 0x00]
+	bne,pn		%XCC, 1b
+	 add		%o0, 0x8, %o0
+
+	/* If anything is left, we copy it one byte at a time.
+	 * Note that %g1 is (src & 0x3) saved above before the
+	 * alignaddr was performed.
+	 */
+endcruft:
+	cmp		%o2, 0
+	add		%o1, %g1, %o1
+	VISExitHalf
+	be,pn		%XCC, out
+	 sub		%o0, %o1, %o3
+
+	andcc		%g1, 0x7, %g0
+	bne,pn		%icc, small_copy_unaligned
+	 andcc		%o2, 0x8, %g0
+	be,pt		%icc, 1f
+	 nop
+	ldx		[%o1], %o5
+	stx		%o5, [%o1 + %o3]
+	add		%o1, 0x8, %o1
+
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%icc, 1f
+	 nop
+	lduw		[%o1], %o5
+	stw		%o5, [%o1 + %o3]
+	add		%o1, 0x4, %o1
+
+1:	andcc		%o2, 0x2, %g0
+	be,pt		%icc, 1f
+	 nop
+	lduh		[%o1], %o5
+	sth		%o5, [%o1 + %o3]
+	add		%o1, 0x2, %o1
+
+1:	andcc		%o2, 0x1, %g0
+	be,pt		%icc, out
+	 nop
+	ldub		[%o1], %o5
+	ba,pt		%xcc, out
+	 stb		%o5, [%o1 + %o3]
+
+medium_copy: /* 16 < len <= 64 */
+	bne,pn		%XCC, small_copy_unaligned
+	 sub		%o0, %o1, %o3
+
+medium_copy_aligned:
+	andn		%o2, 0x7, %o4
+	and		%o2, 0x7, %o2
+1:	subcc		%o4, 0x8, %o4
+	ldx		[%o1], %o5
+	stx		%o5, [%o1 + %o3]
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	lduw		[%o1], %o5
+	stw		%o5, [%o1 + %o3]
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, out
+	 nop
+	ba,pt		%xcc, small_copy_unaligned
+	 nop
+
+small_copy: /* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, small_copy_unaligned
+	 sub		%o0, %o1, %o3
+
+small_copy_aligned:
+	subcc		%o2, 4, %o2
+	lduw		[%o1], %g1
+	stw		%g1, [%o1 + %o3]
+	bgu,pt		%XCC, small_copy_aligned
+	 add		%o1, 4, %o1
+
+out:	retl
+	 mov		%g5, %o0
+
+	.align	32
+small_copy_unaligned:
+	subcc		%o2, 1, %o2
+	ldub		[%o1], %g1
+	stb		%g1, [%o1 + %o3]
+	bgu,pt		%XCC, small_copy_unaligned
+	 add		%o1, 1, %o1
+	retl
+	 mov		%g5, %o0
+
+END(memcpy)
+libc_hidden_def(memcpy)
+
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src - offset - 0x20], %t0; 		\
+	ldx		[%src - offset - 0x18], %t1; 		\
+	ldx		[%src - offset - 0x10], %t2; 		\
+	ldx		[%src - offset - 0x08], %t3; 		\
+	stw		%t0, [%dst - offset - 0x1c]; 		\
+	srlx		%t0, 32, %t0;				\
+	stw		%t0, [%dst - offset - 0x20]; 		\
+	stw		%t1, [%dst - offset - 0x14]; 		\
+	srlx		%t1, 32, %t1;				\
+	stw		%t1, [%dst - offset - 0x18]; 		\
+	stw		%t2, [%dst - offset - 0x0c]; 		\
+	srlx		%t2, 32, %t2;				\
+	stw		%t2, [%dst - offset - 0x10]; 		\
+	stw		%t3, [%dst - offset - 0x04];		\
+	srlx		%t3, 32, %t3;				\
+	stw		%t3, [%dst - offset - 0x08];
+
+#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src - offset - 0x20], %t0; 		\
+	ldx		[%src - offset - 0x18], %t1; 		\
+	ldx		[%src - offset - 0x10], %t2; 		\
+	ldx		[%src - offset - 0x08], %t3; 		\
+	stx		%t0, [%dst - offset - 0x20]; 		\
+	stx		%t1, [%dst - offset - 0x18]; 		\
+	stx		%t2, [%dst - offset - 0x10]; 		\
+	stx		%t3, [%dst - offset - 0x08];		\
+	ldx		[%src - offset - 0x40], %t0; 		\
+	ldx		[%src - offset - 0x38], %t1; 		\
+	ldx		[%src - offset - 0x30], %t2; 		\
+	ldx		[%src - offset - 0x28], %t3; 		\
+	stx		%t0, [%dst - offset - 0x40]; 		\
+	stx		%t1, [%dst - offset - 0x38]; 		\
+	stx		%t2, [%dst - offset - 0x30]; 		\
+	stx		%t3, [%dst - offset - 0x28];
+
+#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3)	\
+	ldx		[%src + offset + 0x00], %t0;		\
+	ldx		[%src + offset + 0x08], %t1; 		\
+	stw		%t0, [%dst + offset + 0x04]; 		\
+	srlx		%t0, 32, %t2;				\
+	stw		%t2, [%dst + offset + 0x00]; 		\
+	stw		%t1, [%dst + offset + 0x0c]; 		\
+	srlx		%t1, 32, %t3;				\
+	stw		%t3, [%dst + offset + 0x08];
+
+#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1)		\
+	ldx		[%src + offset + 0x00], %t0; 		\
+	ldx		[%src + offset + 0x08], %t1; 		\
+	stx		%t0, [%dst + offset + 0x00]; 		\
+	stx		%t1, [%dst + offset + 0x08];
+
+	.align		32
+228:	andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	be,pt		%icc, 2f+4			/* CTI				*/
+1:	 ldub		[%o1 - 1], %o5			/* LOAD		Group		*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	subcc		%o2, 1, %o2			/* IEU1		Group		*/
+	be,pn		%xcc, 229f			/* CTI				*/
+	 stb		%o5, [%o0]			/* Store			*/
+2:	ldub		[%o1 - 1], %o5			/* LOAD		Group		*/
+	sub		%o0, 2, %o0			/* IEU0				*/
+	ldub		[%o1 - 2], %g5			/* LOAD		Group		*/
+	sub		%o1, 2, %o1			/* IEU0				*/
+	subcc		%o2, 2, %o2			/* IEU1		Group		*/
+	stb		%o5, [%o0 + 1]			/* Store			*/
+	bne,pt		%xcc, 2b			/* CTI				*/
+	 stb		%g5, [%o0]			/* Store			*/
+229:	retl
+	 mov		%g4, %o0
+
+	.align		32
+ENTRY(memmove)
+	mov		%o0, %g5
+#ifndef USE_BPR
+	srl		%o2, 0, %o2			/* IEU1		Group		*/
+#endif
+	brz,pn		%o2, out			/* CTI		Group		*/
+	 sub		%o0, %o1, %o4			/* IEU0				*/
+	cmp		%o4, %o2			/* IEU1		Group		*/
+	bgeu,pt		%XCC, 218b			/* CTI				*/
+	 mov		%o0, %g4			/* IEU0				*/
+	add		%o0, %o2, %o0			/* IEU0		Group		*/
+220:	add		%o1, %o2, %o1			/* IEU1				*/
+	cmp		%o2, 15				/* IEU1		Group		*/
+	bleu,pn		%xcc, 228b			/* CTI				*/
+	 andcc		%o0, 7, %g2			/* IEU1		Group		*/
+	sub		%o0, %o1, %g5			/* IEU0				*/
+	andcc		%g5, 3, %o5			/* IEU1		Group		*/
+	bne,pn		%xcc, 232f			/* CTI				*/
+	 andcc		%o1, 3, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 236f			/* CTI				*/
+	 andcc		%o1, 4, %g0			/* IEU1		Group		*/
+	andcc		%o1, 1, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 4f			/* CTI				*/
+	 andcc		%o1, 2, %g0			/* IEU1		Group		*/
+	ldub		[%o1 - 1], %g2			/* Load		Group		*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	sub		%o2, 1, %o2			/* IEU0		Group		*/
+	be,pn		%xcc, 5f			/* CTI		Group		*/
+	 stb		%g2, [%o0]			/* Store			*/
+4:	lduh		[%o1 - 2], %g2			/* Load		Group		*/
+	sub		%o1, 2, %o1			/* IEU0				*/
+	sub		%o0, 2, %o0			/* IEU1				*/
+	sub		%o2, 2, %o2			/* IEU0				*/
+	sth		%g2, [%o0]			/* Store	Group + bubble	*/
+5:	andcc		%o1, 4, %g0			/* IEU1				*/
+236:	be,a,pn		%xcc, 2f			/* CTI				*/
+	 andcc		%o2, -128, %g6			/* IEU1		Group		*/
+	lduw		[%o1 - 4], %g5			/* Load		Group		*/
+	sub		%o1, 4, %o1			/* IEU0				*/
+	sub		%o0, 4, %o0			/* IEU1				*/
+	sub		%o2, 4, %o2			/* IEU0		Group		*/
+	stw		%g5, [%o0]			/* Store			*/
+	andcc		%o2, -128, %g6			/* IEU1		Group		*/
+2:	be,pn		%xcc, 235f			/* CTI				*/
+	 andcc		%o0, 4, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 282f + 4			/* CTI		Group		*/
+5:	RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+	RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
+	RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+	RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
+	subcc		%g6, 128, %g6			/* IEU1		Group		*/
+	sub		%o1, 128, %o1			/* IEU0				*/
+	bne,pt		%xcc, 5b			/* CTI				*/
+	 sub		%o0, 128, %o0			/* IEU0		Group		*/
+235:	andcc		%o2, 0x70, %g6			/* IEU1		Group		*/
+41:	be,pn		%xcc, 280f			/* CTI				*/
+	 andcc		%o2, 8, %g0			/* IEU1		Group		*/
+							/* Clk1 8-(			*/
+							/* Clk2 8-(			*/
+							/* Clk3 8-(			*/
+							/* Clk4 8-(			*/
+279:	rd		%pc, %o5			/* PDU		Group		*/
+	sll		%g6, 1, %g5			/* IEU0		Group		*/
+	sub		%o1, %g6, %o1			/* IEU1				*/
+	sub		%o5, %g5, %o5			/* IEU0  	Group		*/
+	jmpl		%o5 + %lo(280f - 279b), %g0	/* CTI		Group brk forced*/
+	 sub		%o0, %g6, %o0			/* IEU0		Group		*/
+	RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
+	RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
+280:	be,pt		%xcc, 281f			/* CTI				*/
+	 andcc		%o2, 4, %g0			/* IEU1				*/
+	ldx		[%o1 - 8], %g2			/* Load		Group		*/
+	sub		%o0, 8, %o0			/* IEU0				*/
+	stw		%g2, [%o0 + 4]			/* Store	Group		*/
+	sub		%o1, 8, %o1			/* IEU1				*/
+	srlx		%g2, 32, %g2			/* IEU0		Group		*/
+	stw		%g2, [%o0]			/* Store			*/
+281:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 2, %g0			/* IEU1		Group		*/
+	lduw		[%o1 - 4], %g2			/* Load		Group		*/
+	sub		%o1, 4, %o1			/* IEU0				*/
+	stw		%g2, [%o0 - 4]			/* Store	Group		*/
+	sub		%o0, 4, %o0			/* IEU0				*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	lduh		[%o1 - 2], %g2			/* Load		Group		*/
+	sub		%o1, 2, %o1			/* IEU0				*/
+	sth		%g2, [%o0 - 2]			/* Store	Group		*/
+	sub		%o0, 2, %o0			/* IEU0				*/
+1:	be,pt		%xcc, 211f			/* CTI				*/
+	 nop						/* IEU1				*/
+	ldub		[%o1 - 1], %g2			/* Load		Group		*/
+	stb		%g2, [%o0 - 1]			/* Store	Group + bubble	*/
+211:	retl
+	 mov		%g4, %o0
+
+282:	RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+	RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+	subcc		%g6, 128, %g6			/* IEU1		Group		*/
+	sub		%o1, 128, %o1			/* IEU0				*/
+	bne,pt		%xcc, 282b			/* CTI				*/
+	 sub		%o0, 128, %o0			/* IEU0		Group		*/
+	andcc		%o2, 0x70, %g6			/* IEU1				*/
+	be,pn		%xcc, 284f			/* CTI				*/
+	 andcc		%o2, 8, %g0			/* IEU1		Group		*/
+							/* Clk1 8-(			*/
+							/* Clk2 8-(			*/
+							/* Clk3 8-(			*/
+							/* Clk4 8-(			*/
+283:	rd		%pc, %o5			/* PDU		Group		*/
+	sub		%o1, %g6, %o1			/* IEU0		Group		*/
+	sub		%o5, %g6, %o5			/* IEU1				*/
+	jmpl		%o5 + %lo(284f - 283b), %g0	/* CTI		Group brk forced*/
+	 sub		%o0, %g6, %o0			/* IEU0		Group		*/
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
+	RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
+284:	be,pt		%xcc, 285f			/* CTI		Group		*/
+	 andcc		%o2, 4, %g0			/* IEU1				*/
+	ldx		[%o1 - 8], %g2			/* Load		Group		*/
+	sub		%o0, 8, %o0			/* IEU0				*/
+	sub		%o1, 8, %o1			/* IEU0		Group		*/
+	stx		%g2, [%o0]			/* Store			*/
+285:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 2, %g0			/* IEU1		Group		*/
+	lduw		[%o1 - 4], %g2			/* Load		Group		*/
+	sub		%o0, 4, %o0			/* IEU0				*/
+	sub		%o1, 4, %o1			/* IEU0		Group		*/
+	stw		%g2, [%o0]			/* Store			*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 andcc		%o2, 1, %g0			/* IEU1		Group		*/
+	lduh		[%o1 - 2], %g2			/* Load		Group		*/
+	sub		%o0, 2, %o0			/* IEU0				*/
+	sub		%o1, 2, %o1			/* IEU0		Group		*/
+	sth		%g2, [%o0]			/* Store			*/
+1:	be,pt		%xcc, 1f			/* CTI				*/
+	 nop						/* IEU0		Group		*/
+	ldub		[%o1 - 1], %g2			/* Load		Group		*/
+	stb		%g2, [%o0 - 1]			/* Store	Group + bubble	*/
+1:	retl
+	 mov		%g4, %o0
+
+232:	brz,pt		%g2, 2f				/* CTI		Group		*/
+	 sub		%o2, %g2, %o2			/* IEU0		Group		*/
+1:	ldub		[%o1 - 1], %g5			/* Load		Group		*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	subcc		%g2, 1, %g2			/* IEU1		Group		*/
+	bne,pt		%xcc, 1b			/* CTI				*/
+	 stb		%g5, [%o0]			/* Store			*/
+2:	andn		%o2, 7, %g5 			/* IEU0		Group		*/
+	and		%o2, 7, %o2			/* IEU1				*/
+	fmovd		%f0, %f2			/* FPU				*/
+	alignaddr	%o1, %g0, %g1			/* GRU		Group		*/
+	ldd		[%g1], %f4			/* Load		Group		*/
+1:	ldd		[%g1 - 8], %f6			/* Load		Group		*/
+	sub		%g1, 8, %g1			/* IEU0		Group		*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f6, %f4, %f0			/* GRU		Group		*/
+	std		%f0, [%o0 - 8]			/* Store			*/
+	sub		%o1, 8, %o1			/* IEU0		Group		*/
+	be,pn		%xcc, 233f			/* CTI				*/
+	 sub		%o0, 8, %o0			/* IEU1				*/
+	ldd		[%g1 - 8], %f4			/* Load		Group		*/
+	sub		%g1, 8, %g1			/* IEU0				*/
+	subcc		%g5, 8, %g5			/* IEU1				*/
+	faligndata	%f4, %f6, %f0			/* GRU		Group		*/
+	std		%f0, [%o0 - 8]			/* Store			*/
+	sub		%o1, 8, %o1			/* IEU0				*/
+	bne,pn		%xcc, 1b			/* CTI		Group		*/
+	 sub		%o0, 8, %o0			/* IEU0				*/
+233:	brz,pn		%o2, 234f			/* CTI		Group		*/
+	 nop						/* IEU0				*/
+237:	ldub		[%o1 - 1], %g5			/* LOAD				*/
+	sub		%o1, 1, %o1			/* IEU0				*/
+	sub		%o0, 1, %o0			/* IEU1				*/
+	subcc		%o2, 1, %o2			/* IEU1				*/
+	bne,pt		%xcc, 237b			/* CTI				*/
+	 stb		%g5, [%o0]			/* Store	Group		*/
+234:	wr		%g0, FPRS_FEF, %fprs
+	retl
+	 mov		%g4, %o0
+END(memmove)
+libc_hidden_def(memmove)
+
+#ifdef USE_BPR
+weak_alias(memcpy,__align_cpy_1)
+weak_alias(memcpy,__align_cpy_2)
+weak_alias(memcpy,__align_cpy_4)
+weak_alias(memcpy,__align_cpy_8)
+weak_alias(memcpy,__align_cpy_16)
+#endif
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/stpcpy.S b/ap/build/uClibc/libc/string/sparc/sparc64/stpcpy.S
new file mode 100644
index 0000000..8c26c6b
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/stpcpy.S
@@ -0,0 +1,271 @@
+/* Copy SRC to DEST returning the address of the terminating '\0' in DEST.
+   For SPARC v9.
+   Copyright (C) 1998, 1999, 2002, 2003, 2004 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
+                  Jakub Jelinek <jj@ultra.linux.cz>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <asm/asi.h>
+#ifndef XCC
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g6, #scratch
+#endif
+
+	/* Normally, this uses
+	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
+	   to find out if any byte in xword could be zero. This is fast, but
+	   also gives false alarm for any byte in range 0x81-0xff. It does
+	   not matter for correctness, as if this test tells us there could
+	   be some zero byte, we check it byte by byte, but if bytes with
+	   high bits set are common in the strings, then this will give poor
+	   performance. You can #define EIGHTBIT_NOT_RARE and the algorithm
+	   will use one tick slower, but more precise test
+	   ((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080),
+	   which does not give any false alarms (but if some bits are set,
+	   one cannot assume from it which bytes are zero and which are not).
+	   It is yet to be measured, what is the correct default for glibc
+	   in these days for an average user.
+	 */
+
+	.text
+	.align		32
+ENTRY(stpcpy)
+	sethi		%hi(0x01010101), %g1		/* IEU0		Group		*/
+	or		%g1, %lo(0x01010101), %g1	/* IEU0		Group		*/
+	andcc		%o0, 7, %g0			/* IEU1				*/
+	sllx		%g1, 32, %g2			/* IEU0		Group		*/
+
+	bne,pn		%icc, 12f			/* CTI				*/
+	 andcc		%o1, 7, %g3			/* IEU1				*/
+	or		%g1, %g2, %g1			/* IEU0		Group		*/
+	bne,pn		%icc, 14f			/* CTI				*/
+
+	 sllx		%g1, 7, %g2			/* IEU0		Group		*/
+1:	ldx		[%o1], %o3			/* Load				*/
+	add		%o1, 8, %o1			/* IEU1				*/
+2:	mov		%o3, %g3			/* IEU0		Group		*/
+
+	sub		%o3, %g1, %o2			/* IEU1				*/
+3:	ldxa		[%o1] ASI_PNF, %o3		/* Load				*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %g3, %o2			/* IEU0		Group		*/
+#endif
+	add		%o0, 8, %o0			/* IEU0		Group		*/
+	andcc		%o2, %g2, %g0			/* IEU1				*/
+
+	add		%o1, 8, %o1			/* IEU0		Group		*/
+	be,a,pt		%xcc, 2b			/* CTI				*/
+	 stx		%g3, [%o0 - 8]			/* Store			*/
+	srlx		%g3, 56, %g5			/* IEU0		Group		*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 11f			/* CTI				*/
+	 srlx		%g3, 48, %g4			/* IEU0				*/
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 10f			/* CTI				*/
+	 srlx		%g3, 40, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 9f			/* CTI				*/
+
+	 srlx		%g3, 32, %g4			/* IEU0				*/
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 8f			/* CTI				*/
+	 srlx		%g3, 24, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 7f			/* CTI				*/
+	 srlx		%g3, 16, %g4			/* IEU0				*/
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 6f			/* CTI				*/
+	 srlx		%g3, 8, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+	stx		%g3, [%o0 - 8]			/* Store	Group		*/
+	andcc		%g3, 0xff, %g0			/* IEU1				*/
+	bne,pt		%icc, 3b			/* CTI				*/
+
+	 mov		%o3, %g3			/* IEU0		Group		*/
+4:	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, 1, %o0			/* IEU0				*/
+
+	.align		16
+6:	ba,pt		%xcc, 23f			/* CTI		Group		*/
+	 sub		%o0, 3, %g6			/* IEU0				*/
+5:	sub		%o0, 2, %g6			/* IEU0		Group		*/
+	stb		%g5, [%o0 - 2]			/* Store			*/
+
+	srlx		%g3, 16, %g4			/* IEU0		Group		*/
+23:	sth		%g4, [%o0 - 4]			/* Store			*/
+	srlx		%g3, 32, %g4			/* IEU0		Group		*/
+	stw		%g4, [%o0 - 8]			/* Store			*/
+
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+8:	ba,pt		%xcc, 24f			/* CTI		Group		*/
+	 sub		%o0, 5, %g6			/* IEU0				*/
+
+7:	sub		%o0, 4, %g6			/* IEU0		Group		*/
+	stb		%g5, [%o0 - 4]			/* Store			*/
+	srlx		%g3, 32, %g4			/* IEU0		Group		*/
+24:	stw		%g4, [%o0 - 8]			/* Store			*/
+
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0 			/* IEU0				*/
+10:	ba,pt		%xcc, 25f			/* CTI		Group		*/
+	 sub		%o0, 7, %g6			/* IEU0				*/
+
+9:	sub		%o0, 6, %g6			/* IEU0		Group		*/
+	stb		%g5, [%o0 - 6]			/* Store			*/
+	srlx		%g3, 48, %g4			/* IEU0				*/
+25:	sth		%g4, [%o0 - 8]			/* Store	Group		*/
+
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+11:	stb		%g5, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 sub		%o0, 8, %o0			/* IEU0				*/
+
+	.align		16
+12:	or		%g1, %g2, %g1			/* IEU0		Group		*/
+	ldub		[%o1], %o3			/* Load				*/
+	sllx		%g1, 7, %g2			/* IEU0		Group		*/
+	stb		%o3, [%o0]			/* Store	Group		*/
+
+13:	add		%o0, 1, %o0			/* IEU0				*/
+	add		%o1, 1, %o1			/* IEU1				*/
+	andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 4b			/* CTI				*/
+
+	 lduba		[%o1] ASI_PNF, %o3		/* Load				*/
+	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+	bne,a,pt	%icc, 13b			/* CTI				*/
+	 stb		%o3, [%o0]			/* Store			*/
+
+	andcc		%o1, 7, %g3			/* IEU1		Group		*/
+	be,a,pt		%icc, 1b			/* CTI				*/
+	 ldx		[%o1], %o3			/* Load				*/
+14:	orcc		%g0, 64, %g4			/* IEU1		Group		*/
+
+	sllx		%g3, 3, %g5			/* IEU0				*/
+	sub		%o1, %g3, %o1			/* IEU0		Group		*/
+	sub		%g4, %g5, %g4			/* IEU1				*/
+							/* %g1 = 0101010101010101	*
+							 * %g2 = 8080808080808080	*
+							 * %g3 = source alignment	*
+							 * %g5 = number of bits to shift left  *
+							 * %g4 = number of bits to shift right */
+	ldxa		[%o1] ASI_PNF, %o5		/* Load		Group		*/
+
+	addcc		%o1, 8, %o1			/* IEU1				*/
+15:	sllx		%o5, %g5, %o3			/* IEU0		Group		*/
+	ldxa		[%o1] ASI_PNF, %o5		/* Load				*/
+	srlx		%o5, %g4, %o4			/* IEU0		Group		*/
+
+	add		%o0, 8, %o0			/* IEU1				*/
+	or		%o3, %o4, %o3			/* IEU0		Group		*/
+	add		%o1, 8, %o1			/* IEU1				*/
+	sub		%o3, %g1, %o4			/* IEU0		Group		*/
+
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o4, %o3, %o4			/* IEU0		Group		*/
+#endif
+	andcc		%o4, %g2, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 15b			/* CTI				*/
+	 stx		%o3, [%o0 - 8]			/* Store			*/
+	srlx		%o3, 56, %o4			/* IEU0		Group		*/
+
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 22f			/* CTI				*/
+	 srlx		%o3, 48, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 21f			/* CTI				*/
+	 srlx		%o3, 40, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 20f			/* CTI				*/
+
+	 srlx		%o3, 32, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 19f			/* CTI				*/
+	 srlx		%o3, 24, %o4			/* IEU0				*/
+
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 18f			/* CTI				*/
+	 srlx		%o3, 16, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 17f			/* CTI				*/
+	 srlx		%o3, 8, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 16f			/* CTI				*/
+
+	 andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	bne,pn		%icc, 15b			/* CTI				*/
+	 stx		%o3, [%o0 - 8]			/* Store			*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 sub		%o0, 1, %o0			/* IEU0				*/
+
+	.align		16
+17:	ba,pt		%xcc, 26f			/* CTI		Group		*/
+	 subcc		%o0, 3, %g6			/* IEU1				*/
+18:	ba,pt		%xcc, 27f			/* CTI		Group		*/
+	 subcc		%o0, 4, %g6			/* IEU1				*/
+
+19:	ba,pt		%xcc, 28f			/* CTI		Group		*/
+	 subcc		%o0, 5, %g6			/* IEU1				*/
+16:	subcc		%o0, 2, %g6			/* IEU1		Group		*/
+	srlx		%o3, 8, %o4			/* IEU0				*/
+
+	stb		%o4, [%o0 - 2]			/* Store			*/
+26:	srlx		%o3, 16, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 3]			/* Store			*/
+27:	srlx		%o3, 24, %o4			/* IEU0		Group		*/
+
+	stb		%o4, [%o0 - 4]			/* Store			*/
+28:	srlx		%o3, 32, %o4			/* IEU0		Group		*/
+	stw		%o4, [%o0 - 8]			/* Store			*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 mov		%g6, %o0 			/* IEU0				*/
+
+	.align		16
+21:	ba,pt		%xcc, 29f			/* CTI		Group		*/
+	 subcc		%o0, 7, %g6			/* IEU1				*/
+22:	ba,pt		%xcc, 30f			/* CTI		Group		*/
+	 subcc		%o0, 8, %g6			/* IEU1				*/
+
+20:	subcc		%o0, 6, %g6			/* IEU1		Group		*/
+	srlx		%o3, 40, %o4			/* IEU0				*/
+	stb		%o4, [%o0 - 6]			/* Store			*/
+29:	srlx		%o3, 48, %o4			/* IEU0		Group		*/
+
+	stb		%o4, [%o0 - 7]			/* Store			*/
+30:	srlx		%o3, 56, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 8]			/* Store			*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 mov		%g6, %o0			/* IEU0				*/
+END(stpcpy)
+libc_hidden_def(stpcpy)
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/strcat.S b/ap/build/uClibc/libc/string/sparc/sparc64/strcat.S
new file mode 100644
index 0000000..fcc4ba5
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/strcat.S
@@ -0,0 +1,339 @@
+/* strcat (dest, src) -- Append SRC on the end of DEST.
+   For SPARC v9.
+   Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jj@ultra.linux.cz> and
+		  Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <asm/asi.h>
+#ifndef XCC
+#define XCC xcc
+#define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g6, #scratch
+#endif
+
+	/* Normally, this uses
+	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
+	   to find out if any byte in xword could be zero. This is fast, but
+	   also gives false alarm for any byte in range 0x81-0xff. It does
+	   not matter for correctness, as if this test tells us there could
+	   be some zero byte, we check it byte by byte, but if bytes with
+	   high bits set are common in the strings, then this will give poor
+	   performance. You can #define EIGHTBIT_NOT_RARE and the algorithm
+	   will use one tick slower, but more precise test
+	   ((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080),
+	   which does not give any false alarms (but if some bits are set,
+	   one cannot assume from it which bytes are zero and which are not).
+	   It is yet to be measured, what is the correct default for glibc
+	   in these days for an average user.
+	 */
+
+	.text
+	.align		32
+ENTRY(strcat)
+	sethi		%hi(0x01010101), %g1		/* IEU0		Group		*/
+	ldub		[%o0], %o3			/* Load				*/
+	or		%g1, %lo(0x01010101), %g1	/* IEU0		Group		*/
+	mov		%o0, %g6			/* IEU1				*/
+
+	sllx		%g1, 32, %g2			/* IEU0		Group		*/
+	andcc		%o0, 7, %g0			/* IEU1				*/
+	or		%g1, %g2, %g1			/* IEU0		Group		*/
+	bne,pn		%icc, 32f			/* CTI				*/
+
+	 sllx		%g1, 7, %g2			/* IEU0		Group		*/
+	brz,pn		%o3, 30f			/* CTI+IEU1			*/
+	 ldx		[%o0], %o3			/* Load				*/
+48:	add		%o0, 8, %o0			/* IEU0		Group		*/
+
+49:	sub		%o3, %g1, %o2			/* IEU0		Group		*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %o3, %g5			/* IEU0		Group		*/
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%g5, %g2, %g0			/* IEU1		Group		*/
+#else
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+#endif
+	be,pt		%xcc, 49b			/* CTI				*/
+
+	 add		%o0, 8, %o0			/* IEU0				*/
+ 	addcc		%o2, %g1, %g3			/* IEU1		Group		*/
+	srlx		%o2, 32, %o2			/* IEU0				*/
+50:	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+
+	be,pn		%xcc, 51f			/* CTI				*/
+	 srlx		%g3, 56, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 29f			/* CTI				*/
+
+	 srlx		%g3, 48, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 28f			/* CTI				*/
+	 srlx		%g3, 40, %o2			/* IEU0				*/
+
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 27f			/* CTI				*/
+	 srlx		%g3, 32, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 26f			/* CTI				*/
+51:	 srlx		%g3, 24, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 25f			/* CTI				*/
+
+	 srlx		%g3, 16, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 24f			/* CTI				*/
+	 srlx		%g3, 8, %o2			/* IEU0				*/
+
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 23f			/* CTI				*/
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+	andcc		%g3, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 52f			/* CTI				*/
+	 ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+	be,pt		%xcc, 49b			/* CTI				*/
+
+	 add		%o0, 8, %o0			/* IEU0				*/
+	addcc		%o2, %g1, %g3			/* IEU1		Group		*/
+	ba,pt		%xcc, 50b			/* CTI				*/
+	 srlx		%o2, 32, %o2			/* IEU0				*/
+
+	.align		16
+52:	ba,pt		%xcc, 12f			/* CTI		Group		*/
+	 add		%o0, -9, %o0			/* IEU0				*/
+23:	ba,pt		%xcc, 12f			/* CTI		Group		*/
+	 add		%o0, -10, %o0			/* IEU0				*/
+
+24:	ba,pt		%xcc, 12f			/* CTI		Group		*/
+	 add		%o0, -11, %o0			/* IEU0				*/
+25:	ba,pt		%xcc, 12f			/* CTI		Group		*/
+	 add		%o0, -12, %o0			/* IEU0				*/
+
+26:	ba,pt		%xcc, 12f			/* CTI		Group		*/
+	 add		%o0, -13, %o0			/* IEU0				*/
+27:	ba,pt		%xcc, 12f			/* CTI		Group		*/
+	 add		%o0, -14, %o0			/* IEU0				*/
+
+28:	ba,pt		%xcc, 12f			/* CTI		Group		*/
+	 add		%o0, -15, %o0			/* IEU0				*/
+29:	add		%o0, -16, %o0			/* IEU0		Group		*/
+30:	andcc		%o1, 7, %g3			/* IEU1				*/
+
+31:	bne,pn		%icc, 14f			/* CTI				*/
+	 orcc		%g0, 64, %g4			/* IEU1		Group		*/
+1:	ldx		[%o1], %o3			/* Load				*/
+	add		%o1, 8, %o1			/* IEU1				*/
+
+2:	mov		%o3, %g3			/* IEU0		Group		*/
+3:	sub		%o3, %g1, %o2			/* IEU1				*/
+	ldxa		[%o1] ASI_PNF, %o3		/* Load				*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %g3, %o2			/* IEU0		Group		*/
+#endif
+	add		%o0, 8, %o0			/* IEU0		Group		*/
+
+	andcc		%o2, %g2, %g0			/* IEU1				*/
+	add		%o1, 8, %o1			/* IEU0		Group		*/
+	be,a,pt		%xcc, 2b			/* CTI				*/
+	 stx		%g3, [%o0 - 8]			/* Store			*/
+
+	srlx		%g3, 56, %g5			/* IEU0		Group		*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 11f			/* CTI				*/
+	 srlx		%g3, 48, %g4			/* IEU0				*/
+
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 10f			/* CTI				*/
+	 srlx		%g3, 40, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 9f			/* CTI				*/
+	 srlx		%g3, 32, %g4			/* IEU0				*/
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 8f			/* CTI				*/
+
+	 srlx		%g3, 24, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 7f			/* CTI				*/
+	 srlx		%g3, 16, %g4			/* IEU0				*/
+
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 6f			/* CTI				*/
+	 srlx		%g3, 8, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 5f			/* CTI				*/
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+	stx		%g3, [%o0 - 8]			/* Store	Group		*/
+	andcc		%g3, 0xff, %g0			/* IEU1				*/
+
+	bne,pt		%icc, 3b			/* CTI				*/
+	 mov		%o3, %g3			/* IEU0		Group		*/
+4:	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+
+	.align		16
+5:	stb		%g5, [%o0 - 2]			/* Store	Group		*/
+	srlx		%g3, 16, %g4			/* IEU0				*/
+6:	sth		%g4, [%o0 - 4]			/* Store	Group		*/
+	srlx		%g3, 32, %g4			/* IEU0				*/
+
+	stw		%g4, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+7:	stb		%g5, [%o0 - 4]			/* Store	Group		*/
+
+	srlx		%g3, 32, %g4			/* IEU0				*/
+8:	stw		%g4, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0 			/* IEU0				*/
+
+9:	stb		%g5, [%o0 - 6]			/* Store	Group		*/
+	srlx		%g3, 48, %g4			/* IEU0				*/
+10:	sth		%g4, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 mov		%g6, %o0			/* IEU0				*/
+11:	stb		%g5, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+
+	.align		16
+32:	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+	be,a,pn		%icc, 48b			/* CTI				*/
+	 ldx		[%o0], %o3			/* Load				*/
+	add		%o0, 1, %o0			/* IEU0		Group		*/
+
+	brnz,a,pt	%o3, 32b			/* CTI+IEU1			*/
+	 lduba		[%o0] ASI_PNF, %o3		/* Load				*/
+	add		%o0, -1, %o0			/* IEU0		Group		*/
+	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+
+	be,a,pn		%icc, 31b			/* CTI				*/
+	 andcc		%o1, 7, %g3			/* IEU1		Group		*/
+12:	ldub		[%o1], %o3			/* Load				*/
+	stb		%o3, [%o0]			/* Store	Group		*/
+
+13:	add		%o0, 1, %o0			/* IEU0				*/
+	add		%o1, 1, %o1			/* IEU1				*/
+	andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 4b			/* CTI				*/
+
+	 lduba		[%o1] ASI_PNF, %o3		/* Load				*/
+	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+	bne,a,pt	%icc, 13b			/* CTI				*/
+	 stb		%o3, [%o0]			/* Store			*/
+
+	andcc		%o1, 7, %g3			/* IEU1		Group		*/
+	be,a,pt		%icc, 1b			/* CTI				*/
+	 ldx		[%o1], %o3			/* Load				*/
+	orcc		%g0, 64, %g4			/* IEU1		Group		*/
+
+14:	sllx		%g3, 3, %g5			/* IEU0				*/
+	sub		%o1, %g3, %o1			/* IEU0		Group		*/
+	sub		%g4, %g5, %g4			/* IEU1				*/
+							/* %g1 = 0101010101010101	*
+							 * %g2 = 8080808080808080	*
+							 * %g3 = source alignment	*
+							 * %g5 = number of bits to shift left  *
+							 * %g4 = number of bits to shift right */
+	ldxa		[%o1] ASI_PNF, %o5		/* Load		Group		*/
+
+	addcc		%o1, 8, %o1			/* IEU1				*/
+15:	sllx		%o5, %g5, %o3			/* IEU0		Group		*/
+	ldxa		[%o1] ASI_PNF, %o5		/* Load				*/
+	srlx		%o5, %g4, %o4			/* IEU0		Group		*/
+
+	add		%o0, 8, %o0			/* IEU1				*/
+	or		%o3, %o4, %o3			/* IEU0		Group		*/
+	add		%o1, 8, %o1			/* IEU1				*/
+	sub		%o3, %g1, %o4			/* IEU0		Group		*/
+
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o4, %o3, %o4			/* IEU0		Group		*/
+#endif
+	andcc		%o4, %g2, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 15b			/* CTI				*/
+	 stx		%o3, [%o0 - 8]			/* Store			*/
+	srlx		%o3, 56, %o4			/* IEU0		Group		*/
+
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 22f			/* CTI				*/
+	 srlx		%o3, 48, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 21f			/* CTI				*/
+	 srlx		%o3, 40, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 20f			/* CTI				*/
+
+	 srlx		%o3, 32, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 19f			/* CTI				*/
+	 srlx		%o3, 24, %o4			/* IEU0				*/
+
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 18f			/* CTI				*/
+	 srlx		%o3, 16, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 17f			/* CTI				*/
+	 srlx		%o3, 8, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 16f			/* CTI				*/
+
+	 andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	bne,pn		%icc, 15b			/* CTI				*/
+	 stx		%o3, [%o0 - 8]			/* Store			*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 mov		%g6, %o0			/* IEU0				*/
+
+	.align		16
+16:	srlx		%o3, 8, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 2]			/* Store			*/
+17:	srlx		%o3, 16, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 3]			/* Store			*/
+
+18:	srlx		%o3, 24, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 4]			/* Store			*/
+19:	srlx		%o3, 32, %o4			/* IEU0		Group		*/
+	stw		%o4, [%o0 - 8]			/* Store			*/
+
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0 			/* IEU0				*/
+	nop
+	nop
+
+20:	srlx		%o3, 40, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 6]			/* Store			*/
+21:	srlx		%o3, 48, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 7]			/* Store			*/
+
+22:	srlx		%o3, 56, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 8]			/* Store			*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+END(strcat)
+libc_hidden_def(strcat)
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/strchr.S b/ap/build/uClibc/libc/string/sparc/sparc64/strchr.S
new file mode 100644
index 0000000..da26d1f
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/strchr.S
@@ -0,0 +1,486 @@
+/* strchr (str, ch) -- Return pointer to first occurrence of CH in STR.
+   For SPARC v9.
+   Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
+		  Jakub Jelinek <jj@ultra.linux.cz>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <features.h>
+#include <asm/asi.h>
+#ifndef XCC
+#define XCC xcc
+#define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g6, #scratch
+#endif
+
+	/* Normally, this uses
+	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
+	   to find out if any byte in xword could be zero. This is fast, but
+	   also gives false alarm for any byte in range 0x81-0xff. It does
+	   not matter for correctness, as if this test tells us there could
+	   be some zero byte, we check it byte by byte, but if bytes with
+	   high bits set are common in the strings, then this will give poor
+	   performance. You can #define EIGHTBIT_NOT_RARE and the algorithm
+	   will use one tick slower, but more precise test
+	   ((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080),
+	   which does not give any false alarms (but if some bits are set,
+	   one cannot assume from it which bytes are zero and which are not).
+	   It is yet to be measured, what is the correct default for glibc
+	   in these days for an average user.
+	 */
+
+	.text
+	.align		32
+ENTRY(strchr)
+	andcc		%o1, 0xff, %o1			/* IEU1		Group		*/
+	be,pn		%icc, 17f			/* CTI				*/
+	 sllx		%o1, 8, %g3			/* IEU0		Group		*/
+	sethi		%hi(0x01010101), %g1		/* IEU1				*/
+
+	or		%g3, %o1, %g3			/* IEU0		Group		*/
+	ldub		[%o0], %o3			/* Load				*/
+	sllx		%g3, 16, %g5			/* IEU0		Group		*/
+	or		%g1, %lo(0x01010101), %g1	/* IEU1				*/
+
+	sllx		%g1, 32, %g2			/* IEU0		Group		*/
+	brz,pn		%o3, 5f				/* CTI+IEU1			*/
+	 orcc		%g3, %g5, %g3			/* IEU1		Group		*/
+	sllx		%g3, 32, %g5			/* IEU0				*/
+
+	cmp		%o3, %o1			/* IEU1		Group		*/
+	be,pn		%xcc, 14f			/* CTI				*/
+	 or		%g1, %g2, %g1			/* IEU0				*/
+	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+
+	bne,a,pn	%icc, 15f			/* CTI				*/
+	 add		%o0, 1, %o0			/* IEU0				*/
+	ldx		[%o0], %o3			/* Load		Group		*/
+1:	sllx		%g1, 7, %g2			/* IEU0				*/
+
+	or		%g3, %g5, %g3			/* IEU1				*/
+	add		%o0, 8, %o0			/* IEU0		Group		*/
+	xor		%o3, %g3, %o4			/* IEU1				*/
+							/* %g1 = 0101010101010101	*
+							 * %g2 = 8080088080808080	*
+							 * %g3 =  c c c c c c c c	*
+							 * %o3 =      value		*
+							 * %o4 =   value XOR c		*/
+2:	sub		%o3, %g1, %o2			/* IEU0		Group		*/
+
+	sub		%o4, %g1, %o5			/* IEU1				*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %o3, %g6			/* IEU0		Group		*/
+	andn		%o5, %o4, %o5			/* IEU1				*/
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	or		%o5, %g6, %o5			/* IEU0		Group		*/
+#else
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	or		%o5, %o2, %o5			/* IEU0		Group		*/
+#endif
+	add		%o0, 8, %o0			/* IEU1				*/
+
+	andcc		%o5, %g2, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 2b			/* CTI				*/
+	 xor		%o3, %g3, %o4			/* IEU0				*/
+	srlx		%o5, 32, %g5			/* IEU0		Group		*/
+
+	add		%o2, %g1, %o2			/* IEU1				*/
+3:	andcc		%g5, %g2, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 4f			/* CTI				*/
+	 srlx		%o2, 56, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+	 srlx		%o4, 56, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 6f			/* CTI				*/
+	 srlx		%o2, 48, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+
+	 srlx		%o4, 48, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 7f			/* CTI				*/
+	 srlx		%o2, 40, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+	 srlx		%o4, 40, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 8f			/* CTI				*/
+	 srlx		%o2, 32, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+
+	 srlx		%o4, 32, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 9f			/* CTI				*/
+4:	 srlx		%o2, 24, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+	 srlx		%o4, 24, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 10f			/* CTI				*/
+	 srlx		%o2, 16, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+
+	 srlx		%o4, 16, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 11f			/* CTI				*/
+	 srlx		%o2, 8, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+	 srlx		%o4, 8, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 12f			/* CTI				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 13f			/* CTI				*/
+	 xor		%o3, %g3, %o4			/* IEU0				*/
+	ldxa		[%o0] ASI_PNF, %o3		/* Load		Group		*/
+
+	sub		%o4, %g1, %o5			/* IEU0				*/
+	or		%o5, %o2, %o5			/* IEU1				*/
+	add		%o0, 8, %o0			/* IEU0		Group		*/
+	andcc		%o5, %g2, %g0			/* IEU1				*/
+
+	be,a,pt		%xcc, 2b			/* CTI				*/
+	 xor		%o3, %g3, %o4			/* IEU0		Group		*/
+	srlx		%o5, 32, %g5			/* IEU0		Group		*/
+	ba,pt		%xcc, 3b			/* CTI				*/
+
+	 add		%o2, %g1, %o2			/* IEU1				*/
+
+	.align		16
+5:	retl						/* CTI+IEU1	Group		*/
+	 clr		%o0				/* IEU0				*/
+6:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -16, %o0			/* IEU0				*/
+
+7:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -15, %o0			/* IEU0				*/
+8:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -14, %o0			/* IEU0				*/
+
+9:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -13, %o0			/* IEU0				*/
+10:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -12, %o0			/* IEU0				*/
+
+11:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -11, %o0			/* IEU0				*/
+12:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -10, %o0			/* IEU0				*/
+
+13:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -9, %o0			/* IEU0				*/
+14: 	retl						/* CTI+IEU1	Group		*/
+	 nop						/* IEU0				*/
+
+	.align		16
+15:	ldub		[%o0], %o3			/* Load		Group		*/
+16:	andcc		%o0, 7, %g0			/* IEU1				*/
+	be,a,pn		%icc, 1b			/* CTI				*/
+	 ldx		[%o0], %o3			/* Load		Group		*/
+
+	andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5b			/* CTI				*/
+	 add		%o0, 1, %o0			/* IEU0				*/
+	cmp		%o3, %o1			/* IEU1		Group		*/
+
+	bne,a,pn	%icc, 16b			/* CTI				*/
+	 ldub		[%o0], %o3			/* Load				*/
+	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -1, %o0			/* IEU0				*/
+
+	/* strchr (str, 0)			*/
+	.align		32
+	nop
+	.align		16
+17:	sethi		%hi(0x01010101), %g1		/* IEU0		Group		*/
+	ldub		[%o0], %o3			/* Load				*/
+	or		%g1, %lo(0x01010101), %g1	/* IEU0		Group		*/
+	sllx		%g1, 32, %g2			/* IEU0		Group		*/
+
+	andcc		%o0, 7, %g0			/* IEU1				*/
+	or		%g1, %g2, %g1			/* IEU0		Group		*/
+	bne,pn		%icc, 32f			/* CTI				*/
+	 sllx		%g1, 7, %g2			/* IEU0		Group		*/
+
+	brz,pn		%o3, 30f			/* CTI+IEU1			*/
+	 ldx		[%o0], %o3			/* Load				*/
+18:	add		%o0, 8, %o0			/* IEU0		Group		*/
+19:	sub		%o3, %g1, %o2			/* IEU0		Group		*/
+
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %o3, %g6			/* IEU0		Group		*/
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%g6, %g2, %g0			/* IEU1		Group		*/
+#else
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+#endif
+	be,pt		%xcc, 19b			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU0				*/
+
+ 	addcc		%o2, %g1, %g3			/* IEU1		Group		*/
+	srlx		%o2, 32, %o2			/* IEU0				*/
+20:	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+	be,pn		%xcc, 21f			/* CTI				*/
+
+	 srlx		%g3, 56, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 29f			/* CTI				*/
+	 srlx		%g3, 48, %o2			/* IEU0				*/
+
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 28f			/* CTI				*/
+	 srlx		%g3, 40, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 27f			/* CTI				*/
+	 srlx		%g3, 32, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 26f			/* CTI				*/
+
+21:	 srlx		%g3, 24, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 25f			/* CTI				*/
+	 srlx		%g3, 16, %o2			/* IEU0				*/
+
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 24f			/* CTI				*/
+	 srlx		%g3, 8, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 23f			/* CTI				*/
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+	andcc		%g3, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 22f			/* CTI				*/
+
+	 ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+	be,pt		%xcc, 19b			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU0				*/
+
+	addcc		%o2, %g1, %g3			/* IEU1		Group		*/
+	ba,pt		%xcc, 20b			/* CTI				*/
+	 srlx		%o2, 32, %o2			/* IEU0				*/
+
+	.align		16
+22:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -9, %o0			/* IEU0				*/
+23:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -10, %o0			/* IEU0				*/
+
+24:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -11, %o0			/* IEU0				*/
+25:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -12, %o0			/* IEU0				*/
+
+26:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -13, %o0			/* IEU0				*/
+27:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -14, %o0			/* IEU0				*/
+
+28:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -15, %o0			/* IEU0				*/
+29:	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -16, %o0			/* IEU0				*/
+
+30:	retl						/* CTI+IEU1	Group		*/
+	 nop						/* IEU0				*/
+
+	.align		16
+32:	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+	be,a,pn		%icc, 18b			/* CTI				*/
+	 ldx		[%o0], %o3			/* Load				*/
+	add		%o0, 1, %o0			/* IEU0		Group		*/
+
+	brnz,a,pt	%o3, 32b			/* CTI+IEU1			*/
+	 lduba		[%o0] ASI_PNF, %o3		/* Load				*/
+	retl						/* CTI+IEU1	Group		*/
+	 add		%o0, -1, %o0			/* IEU0				*/
+END(strchr)
+libc_hidden_def(strchr)
+#ifdef __UCLIBC_SUSV3_LEGACY__
+strong_alias(strchr,index)
+#endif
+
+	.align		32
+ENTRY(strrchr)
+	andcc		%o1, 0xff, %o1			/* IEU1		Group		*/
+	be,pn		%icc, 17b			/* CTI				*/
+	 clr		%g4				/* IEU0				*/
+	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+
+	bne,pn		%icc, 13f			/* CTI				*/
+	 sllx		%o1, 8, %g3			/* IEU0				*/
+	ldx		[%o0], %o3			/* Load		Group		*/
+1:	sethi		%hi(0x01010101), %g1		/* IEU0				*/
+
+	or		%g3, %o1, %g3			/* IEU1				*/
+	sllx		%g3, 16, %g5			/* IEU0		Group		*/
+	or		%g1, %lo(0x01010101), %g1	/* IEU1				*/
+	sllx		%g1, 32, %g2			/* IEU0		Group		*/
+
+	or		%g3, %g5, %g3			/* IEU1				*/
+	sllx		%g3, 32, %g5			/* IEU0		Group		*/
+	or		%g1, %g2, %g1			/* IEU1				*/
+	sllx		%g1, 7, %g2			/* IEU0		Group		*/
+
+	or		%g3, %g5, %g3			/* IEU1				*/
+	add		%o0, 8, %o0			/* IEU0		Group		*/
+	xor		%o3, %g3, %o4			/* IEU1				*/
+							/* %g1 = 0101010101010101	*
+							 * %g2 = 8080088080808080	*
+							 * %g3 =  c c c c c c c c	*
+							 * %o3 =   value		*
+							 * %o4 =   value XOR c		*/
+2:	sub		%o3, %g1, %o2			/* IEU0		Group		*/
+
+3:	sub		%o4, %g1, %o5			/* IEU1				*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %o3, %g6			/* IEU0		Group		*/
+	andn		%o5, %o4, %o5			/* IEU1				*/
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+
+	or		%o5, %g6, %o5			/* IEU0		Group		*/
+#else
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+
+	or		%o5, %o2, %o5			/* IEU0		Group		*/
+#endif
+	add		%o0, 8, %o0			/* IEU1				*/
+	andcc		%o5, %g2, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 2b			/* CTI				*/
+
+	 xor		%o3, %g3, %o4			/* IEU0				*/
+	srlx		%o5, 32, %g5			/* IEU0		Group		*/
+	add		%o2, %g1, %o2			/* IEU1				*/
+	andcc		%g5, %g2, %g0			/* IEU1		Group		*/
+
+	be,pn		%xcc, 7f			/* CTI				*/
+	 srlx		%o2, 56, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12f			/* CTI				*/
+
+	 srlx		%o4, 56, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	srlx		%o2, 48, %g5			/* IEU0				*/
+	be,a,pn		%icc, 4f			/* CTI				*/
+
+	 add		%o0, -16, %g4			/* IEU0		Group		*/
+4:	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12f			/* CTI				*/
+	 srlx		%o4, 48, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	srlx		%o2, 40, %g5			/* IEU0				*/
+	be,a,pn		%icc, 5f			/* CTI				*/
+	 add		%o0, -15, %g4			/* IEU0		Group		*/
+
+5:	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12f			/* CTI				*/
+	 srlx		%o4, 40, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	srlx		%o2, 32, %g5			/* IEU0				*/
+	be,a,pn		%icc, 6f			/* CTI				*/
+	 add		%o0, -14, %g4			/* IEU0		Group		*/
+6:	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 12f			/* CTI				*/
+	 srlx		%o4, 32, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,a,pn		%icc, 7f			/* CTI				*/
+
+	 add		%o0, -13, %g4			/* IEU0				*/
+7:	srlx		%o2, 24, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12f			/* CTI				*/
+
+	 srlx		%o4, 24, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	srlx		%o2, 16, %g5			/* IEU0				*/
+	be,a,pn		%icc, 8f			/* CTI				*/
+
+	 add		%o0, -12, %g4			/* IEU0		Group		*/
+8:	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12f			/* CTI				*/
+	 srlx		%o4, 16, %g5			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	srlx		%o2, 8, %g5			/* IEU0				*/
+	be,a,pn		%icc, 9f			/* CTI				*/
+	 add		%o0, -11, %g4			/* IEU0		Group		*/
+
+9:	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12f			/* CTI				*/
+	 srlx		%o4, 8, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,a,pn		%icc, 10f			/* CTI				*/
+	 add		%o0, -10, %g4			/* IEU0				*/
+10:	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12f			/* CTI				*/
+
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,a,pn		%icc, 11f			/* CTI				*/
+	 add		%o0, -9, %g4			/* IEU0				*/
+
+11:	ba,pt		%xcc, 3b			/* CTI		Group		*/
+	 xor		%o3, %g3, %o4			/* IEU0		Group		*/
+12:	retl						/* CTI+IEU1	Group		*/
+	 mov		%g4, %o0			/* IEU0				*/
+
+	.align		16
+13:	ldub		[%o0], %o3			/* Load		Group		*/
+	add		%o0, 1, %o0			/* IEU0				*/
+14:	andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 12b			/* CTI				*/
+
+	cmp		%o3, %o1			/* IEU1		Group		*/
+	ldub		[%o0], %o3			/* Load				*/
+	be,a,pn 	%icc, 15f			/* CTI				*/
+	 add		%o0, -1, %g4			/* IEU0		Group		*/
+
+15:	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+	bne,a,pt	%icc, 14b			/* CTI				*/
+	 add		%o0, 1, %o0			/* IEU0				*/
+	ba,pt		%xcc, 1b			/* CTI		Group		*/
+
+	 ldx		[%o0], %o3			/* Load				*/
+END(strrchr)
+libc_hidden_def(strrchr)
+#ifdef __UCLIBC_SUSV3_LEGACY__
+strong_alias(strrchr,rindex)
+#endif
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/strcmp.S b/ap/build/uClibc/libc/string/sparc/sparc64/strcmp.S
new file mode 100644
index 0000000..df9e691
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/strcmp.S
@@ -0,0 +1,279 @@
+/* Compare two strings for differences.
+   For SPARC v9.
+   Copyright (C) 1997, 1999, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
+                  Jakub Jelinek <jj@ultra.linux.cz>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <asm/asi.h>
+#ifndef XCC
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g6, #scratch
+#endif
+
+	/* Normally, this uses
+	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
+	   to find out if any byte in xword could be zero. This is fast, but
+	   also gives false alarm for any byte in range 0x81-0xff. It does
+	   not matter for correctness, as if this test tells us there could
+	   be some zero byte, we check it byte by byte, but if bytes with
+	   high bits set are common in the strings, then this will give poor
+	   performance. You can #define EIGHTBIT_NOT_RARE and the algorithm
+	   will use one tick slower, but more precise test
+	   ((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080),
+	   which does not give any false alarms (but if some bits are set,
+	   one cannot assume from it which bytes are zero and which are not).
+	   It is yet to be measured, what is the correct default for glibc
+	   in these days for an average user.
+	 */
+
+	.text
+	.align		32
+ENTRY(strcmp)
+	sethi		%hi(0x01010101), %g1			/* IEU0		Group		*/
+	andcc		%o0, 7, %g0				/* IEU1				*/
+	bne,pn		%icc, 7f				/* CTI				*/
+	 or		%g1, %lo(0x01010101), %g1		/* IEU0		Group		*/
+
+	andcc		%o1, 7, %g3				/* IEU1				*/
+	bne,pn		%icc, 9f				/* CTI				*/
+	 sllx		%g1, 32, %g2				/* IEU0		Group		*/
+	ldx		[%o0], %o2				/* Load				*/
+
+	or		%g1, %g2, %g1				/* IEU0		Group		*/
+1:	ldx		[%o1], %o3				/* Load				*/
+	sub		%o1, %o0, %o1				/* IEU1				*/
+	sllx		%g1, 7, %g2				/* IEU0		Group		*/
+
+2:	add		%o0, 8, %o0				/* IEU1				*/
+	sub		%o2, %g1, %g3				/* IEU0		Group		*/
+	subcc		%o2, %o3, %g0				/* IEU1				*/
+	bne,pn		%xcc, 13f				/* CTI				*/
+
+#ifdef EIGHTBIT_NOT_RARE
+	 andn		%g3, %o2, %g4				/* IEU0		Group		*/
+	ldxa		[%o0] ASI_PNF, %o2			/* Load				*/
+	andcc		%g4, %g2, %g0				/* IEU1		Group		*/
+#else
+	 ldxa		[%o0] ASI_PNF, %o2			/* Load		Group		*/
+	andcc		%g3, %g2, %g0				/* IEU1				*/
+#endif
+	be,a,pt		%xcc, 2b				/* CTI				*/
+	 ldxa		[%o1 + %o0] ASI_PNF, %o3		/* Load		Group		*/
+
+	addcc		%g3, %g1, %o4				/* IEU1				*/
+	srlx		%g3, 32, %g3				/* IEU0				*/
+	andcc		%g3, %g2, %g0				/* IEU1		Group		*/
+	be,pt		%xcc, 3f				/* CTI				*/
+
+	 srlx		%o4, 56, %o5				/* IEU0				*/
+	andcc		%o5, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4f				/* CTI				*/
+	 srlx		%o4, 48, %o5				/* IEU0				*/
+
+	andcc		%o5, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4f				/* CTI				*/
+	 srlx		%o4, 40, %o5				/* IEU0				*/
+	andcc		%o5, 0xff, %g0				/* IEU1		Group		*/
+
+	be,pn		%icc, 4f				/* CTI				*/
+	 srlx		%o4, 32, %o5				/* IEU0				*/
+	andcc		%o5, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4f				/* CTI				*/
+
+3:	 srlx		%o4, 24, %o5				/* IEU0				*/
+	andcc		%o5, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4f				/* CTI				*/
+	 srlx		%o4, 16, %o5				/* IEU0				*/
+
+	andcc		%o5, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4f				/* CTI				*/
+	 srlx		%o4, 8, %o5				/* IEU0				*/
+	andcc		%o5, 0xff, %g0				/* IEU1		Group		*/
+
+	be,pn		%icc, 4f				/* CTI				*/
+	 andcc		%o4, 0xff, %g0				/* IEU1		Group		*/
+	bne,a,pn	%icc, 2b				/* CTI				*/
+	 ldxa		[%o1 + %o0] ASI_PNF, %o3		/* Load				*/
+
+4:	retl							/* CTI+IEU1	Group		*/
+	 clr		%o0					/* IEU0				*/
+
+	.align		32
+13:	mov		0xff, %g6				/* IEU0		Group		*/
+#ifdef EIGHTBIT_NOT_RARE
+	andcc		%g4, %g2, %g0				/* IEU1				*/
+#else
+	andcc		%g3, %g2, %g0				/* IEU1				*/
+#endif
+	be,pt		%xcc, 25f				/* CTI				*/
+	 addcc		%g3, %g1, %o4				/* IEU1		Group		*/
+
+	srlx		%g3, 32, %g3				/* IEU0				*/
+	andcc		%g3, %g2, %g0				/* IEU1		Group		*/
+	be,pt		%xcc, 23f				/* CTI				*/
+	 sllx		%g6, 56, %o5				/* IEU0				*/
+
+	andcc		%o4, %o5, %g0				/* IEU1		Group		*/
+	be,pn		%xcc, 24f				/* CTI				*/
+	 sllx		%g6, 48, %o5				/* IEU0				*/
+	andcc		%o4, %o5, %g0				/* IEU1		Group		*/
+
+	be,pn		%xcc, 24f				/* CTI				*/
+	 sllx		%g6, 40, %o5				/* IEU0				*/
+	andcc		%o4, %o5, %g0				/* IEU1		Group		*/
+	be,pn		%xcc, 24f				/* CTI				*/
+
+	 sllx		%g6, 32, %o5				/* IEU0				*/
+	andcc		%o4, %o5, %g0				/* IEU1		Group		*/
+	be,pn		%xcc, 24f				/* CTI				*/
+23:	 sllx		%g6, 24, %o5				/* IEU0				*/
+
+	andcc		%o4, %o5, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 24f				/* CTI				*/
+	 sllx		%g6, 16, %o5				/* IEU0				*/
+	andcc		%o4, %o5, %g0				/* IEU1		Group		*/
+
+	be,pn		%icc, 24f				/* CTI				*/
+	 sllx		%g6, 8, %o5				/* IEU0				*/
+	andcc		%o4, %o5, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 24f				/* CTI				*/
+
+	 mov		%g6, %o5				/* IEU0				*/
+25:	cmp		%o4, %o3				/* IEU1		Group		*/
+5:	mov		-1, %o0					/* IEU0				*/
+	retl							/* CTI+IEU1	Group		*/
+
+	 movgu		%xcc, 1, %o0				/* Single	Group		*/
+
+	.align		16
+24:	sub		%o5, 1, %g6				/* IEU0		Group		*/
+	clr		%o0					/* IEU1				*/
+	or		%o5, %g6, %o5				/* IEU0		Group		*/
+	andn		%o4, %o5, %o4				/* IEU0		Group		*/
+
+	andn		%o3, %o5, %o3				/* IEU1				*/
+	cmp		%o4, %o3				/* IEU1		Group		*/
+	movgu		%xcc, 1, %o0				/* Single	Group		*/
+	retl							/* CTI+IEU1	Group		*/
+
+	 movlu		%xcc, -1, %o0				/* Single	Group		*/
+6:	retl							/* CTI+IEU1	Group		*/
+	 mov		%o4, %o0				/* IEU0				*/
+
+	.align		16
+7:	ldub		[%o0], %o2				/* Load				*/
+	add		%o0, 1, %o0				/* IEU1				*/
+	ldub		[%o1], %o3				/* Load		Group		*/
+	sllx		%g1, 32, %g2				/* IEU0				*/
+
+8:	add		%o1, 1, %o1				/* IEU1				*/
+	subcc		%o2, %o3, %o4				/* IEU1		Group		*/
+	bne,pn		%xcc, 6b				/* CTI				*/
+	 lduba		[%o0] ASI_PNF, %o2			/* Load				*/
+
+	brz,pn		%o3, 4b					/* CTI+IEU1	Group		*/
+	 lduba		[%o1] ASI_PNF, %o3			/* Load				*/
+	andcc		%o0, 7, %g0				/* IEU1		Group		*/
+	bne,a,pn	%icc, 8b				/* CTI				*/
+
+	 add		%o0, 1, %o0				/* IEU0				*/
+	or		%g1, %g2, %g1				/* IEU0		Group		*/
+	andcc		%o1, 7, %g3				/* IEU1				*/
+	be,a,pn		%icc, 1b				/* CTI				*/
+
+	 ldxa		[%o0] ASI_PNF, %o2			/* Load		Group		*/
+9:	sllx		%g3, 3, %g5				/* IEU0				*/
+	mov		64, %o5					/* IEU1				*/
+	sub		%o1, %g3, %o1				/* IEU0		Group		*/
+
+	sub		%o5, %g5, %o5				/* IEU1				*/
+	ldxa		[%o1] ASI_PNF, %g6			/* Load		Group		*/
+	or		%g1, %g2, %g1				/* IEU0				*/
+	sub		%o1, %o0, %o1				/* IEU1				*/
+
+	sllx		%g1, 7, %g2				/* IEU0		Group		*/
+	add		%o1, 8, %o1				/* IEU1				*/
+								/* %g1 = 0101010101010101
+								 * %g2 = 8080808080800880
+								 * %g5 = number of bits to shift left
+								 * %o5 = number of bits to shift right */
+10:	sllx		%g6, %g5, %o3				/* IEU0		Group		*/
+	ldxa		[%o1 + %o0] ASI_PNF, %g6		/* Load				*/
+
+11:	srlx		%g6, %o5, %o4				/* IEU0		Group		*/
+	ldxa		[%o0] ASI_PNF, %o2			/* Load				*/
+	or		%o3, %o4, %o3				/* IEU1				*/
+	add		%o0, 8, %o0				/* IEU0		Group		*/
+
+	subcc		%o2, %o3, %g0				/* IEU1				*/
+#ifdef EIGHTBIT_NOT_RARE
+	sub		%o2, %g1, %g3				/* IEU0		Group		*/
+	bne,pn		%xcc, 13b				/* CTI				*/
+	 andn		%g3, %o2, %g4				/* IEU0		Group		*/
+
+	andcc		%g4, %g2, %g0				/* IEU1		Group		*/
+	be,pt		%xcc, 10b				/* CTI				*/
+	 srlx		%g4, 32, %g4				/* IEU0				*/
+	andcc		%g4, %g2, %g0				/* IEU1		Group		*/
+#else
+	bne,pn		%xcc, 13b				/* CTI				*/
+	 sub		%o2, %g1, %g3				/* IEU0		Group		*/
+	andcc		%g3, %g2, %g0				/* IEU1		Group		*/
+
+	be,pt		%xcc, 10b				/* CTI				*/
+	 srlx		%g3, 32, %g3				/* IEU0				*/
+	andcc		%g3, %g2, %g0				/* IEU1		Group		*/
+#endif
+	be,pt		%xcc, 12f				/* CTI				*/
+
+	 srlx		%o2, 56, %g3				/* IEU0				*/
+	andcc		%g3, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4b				/* CTI				*/
+	 srlx		%o2, 48, %g3				/* IEU0				*/
+
+	andcc		%g3, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4b				/* CTI				*/
+	 srlx		%o2, 40, %g3				/* IEU0				*/
+	andcc		%g3, 0xff, %g0				/* IEU1		Group		*/
+
+	be,pn		%icc, 4b				/* CTI				*/
+	 srlx		%o2, 32, %g3				/* IEU0				*/
+	andcc		%g3, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4b				/* CTI				*/
+
+12:	 srlx		%o2, 24, %g3				/* IEU0				*/
+	andcc		%g3, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4b				/* CTI				*/
+	 srlx		%o2, 16, %g3				/* IEU0				*/
+
+	andcc		%g3, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4b				/* CTI				*/
+	 srlx		%o2, 8, %g3				/* IEU0				*/
+	andcc		%g3, 0xff, %g0				/* IEU1		Group		*/
+
+	be,pn		%icc, 4b				/* CTI				*/
+	 andcc		%o2, 0xff, %g0				/* IEU1		Group		*/
+	be,pn		%icc, 4b				/* CTI				*/
+	 sllx		%g6, %g5, %o3				/* IEU0				*/
+
+	ba,pt		%xcc, 11b				/* CTI		Group		*/
+	 ldxa		[%o1 + %o0] ASI_PNF, %g6		/* Load				*/
+END(strcmp)
+libc_hidden_def(strcmp)
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/strcpy.S b/ap/build/uClibc/libc/string/sparc/sparc64/strcpy.S
new file mode 100644
index 0000000..1317d54
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/strcpy.S
@@ -0,0 +1,245 @@
+/* Copy SRC to DEST returning DEST.
+   For SPARC v9.
+   Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
+                  Jakub Jelinek <jj@ultra.linux.cz>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <asm/asi.h>
+#ifndef XCC
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g6, #scratch
+#endif
+
+	/* Normally, this uses
+	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
+	   to find out if any byte in xword could be zero. This is fast, but
+	   also gives false alarm for any byte in range 0x81-0xff. It does
+	   not matter for correctness, as if this test tells us there could
+	   be some zero byte, we check it byte by byte, but if bytes with
+	   high bits set are common in the strings, then this will give poor
+	   performance. You can #define EIGHTBIT_NOT_RARE and the algorithm
+	   will use one tick slower, but more precise test
+	   ((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080),
+	   which does not give any false alarms (but if some bits are set,
+	   one cannot assume from it which bytes are zero and which are not).
+	   It is yet to be measured, what is the correct default for glibc
+	   in these days for an average user.
+	 */
+
+	.text
+	.align		32
+ENTRY(strcpy)
+	sethi		%hi(0x01010101), %g1		/* IEU0		Group		*/
+	mov		%o0, %g6			/* IEU1				*/
+	or		%g1, %lo(0x01010101), %g1	/* IEU0		Group		*/
+	andcc		%o0, 7, %g0			/* IEU1				*/
+
+	sllx		%g1, 32, %g2			/* IEU0		Group		*/
+	bne,pn		%icc, 12f			/* CTI				*/
+	 andcc		%o1, 7, %g3			/* IEU1				*/
+	or		%g1, %g2, %g1			/* IEU0		Group		*/
+
+	bne,pn		%icc, 14f			/* CTI				*/
+	 sllx		%g1, 7, %g2			/* IEU0		Group		*/
+1:	ldx		[%o1], %o3			/* Load				*/
+	add		%o1, 8, %o1			/* IEU1				*/
+
+2:	mov		%o3, %g3			/* IEU0		Group		*/
+3:	sub		%o3, %g1, %o2			/* IEU1				*/
+	ldxa		[%o1] ASI_PNF, %o3		/* Load				*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %g3, %o2			/* IEU0		Group		*/
+#endif
+	add		%o0, 8, %o0			/* IEU0		Group		*/
+
+	andcc		%o2, %g2, %g0			/* IEU1				*/
+	add		%o1, 8, %o1			/* IEU0		Group		*/
+	be,a,pt		%xcc, 2b			/* CTI				*/
+	 stx		%g3, [%o0 - 8]			/* Store			*/
+
+	srlx		%g3, 56, %g5			/* IEU0		Group		*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 11f			/* CTI				*/
+	 srlx		%g3, 48, %g4			/* IEU0				*/
+
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 10f			/* CTI				*/
+	 srlx		%g3, 40, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 9f			/* CTI				*/
+	 srlx		%g3, 32, %g4			/* IEU0				*/
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 8f			/* CTI				*/
+
+	 srlx		%g3, 24, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 7f			/* CTI				*/
+	 srlx		%g3, 16, %g4			/* IEU0				*/
+
+	andcc		%g4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 6f			/* CTI				*/
+	 srlx		%g3, 8, %g5			/* IEU0				*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 5f			/* CTI				*/
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+	stx		%g3, [%o0 - 8]			/* Store	Group		*/
+	andcc		%g3, 0xff, %g0			/* IEU1				*/
+
+	bne,pt		%icc, 3b			/* CTI				*/
+	 mov		%o3, %g3			/* IEU0		Group		*/
+4:	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+
+	.align		16
+5:	stb		%g5, [%o0 - 2]			/* Store	Group		*/
+	srlx		%g3, 16, %g4			/* IEU0				*/
+6:	sth		%g4, [%o0 - 4]			/* Store	Group		*/
+	srlx		%g3, 32, %g4			/* IEU0				*/
+
+	stw		%g4, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+7:	stb		%g5, [%o0 - 4]			/* Store	Group		*/
+
+	srlx		%g3, 32, %g4			/* IEU0				*/
+8:	stw		%g4, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0 			/* IEU0				*/
+
+9:	stb		%g5, [%o0 - 6]			/* Store	Group		*/
+	srlx		%g3, 48, %g4			/* IEU0				*/
+10:	sth		%g4, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 mov		%g6, %o0			/* IEU0				*/
+11:	stb		%g5, [%o0 - 8]			/* Store	Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+
+12:	or		%g1, %g2, %g1			/* IEU0		Group		*/
+	ldub		[%o1], %o3			/* Load				*/
+	sllx		%g1, 7, %g2			/* IEU0		Group		*/
+	stb		%o3, [%o0]			/* Store	Group		*/
+
+13:	add		%o0, 1, %o0			/* IEU0				*/
+	add		%o1, 1, %o1			/* IEU1				*/
+	andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 4b			/* CTI				*/
+
+	 lduba		[%o1] ASI_PNF, %o3		/* Load				*/
+	andcc		%o0, 7, %g0			/* IEU1		Group		*/
+	bne,a,pt	%icc, 13b			/* CTI				*/
+	 stb		%o3, [%o0]			/* Store			*/
+
+	andcc		%o1, 7, %g3			/* IEU1		Group		*/
+	be,a,pt		%icc, 1b			/* CTI				*/
+	 ldx		[%o1], %o3			/* Load				*/
+14:	orcc		%g0, 64, %g4			/* IEU1		Group		*/
+
+	sllx		%g3, 3, %g5			/* IEU0				*/
+	sub		%o1, %g3, %o1			/* IEU0		Group		*/
+	sub		%g4, %g5, %g4			/* IEU1				*/
+							/* %g1 = 0101010101010101	*
+							 * %g2 = 8080808080808080	*
+							 * %g3 = source alignment	*
+							 * %g5 = number of bits to shift left  *
+							 * %g4 = number of bits to shift right */
+	ldxa		[%o1] ASI_PNF, %o5		/* Load		Group		*/
+
+	addcc		%o1, 8, %o1			/* IEU1				*/
+15:	sllx		%o5, %g5, %o3			/* IEU0		Group		*/
+	ldxa		[%o1] ASI_PNF, %o5		/* Load				*/
+	srlx		%o5, %g4, %o4			/* IEU0		Group		*/
+
+	add		%o0, 8, %o0			/* IEU1				*/
+	or		%o3, %o4, %o3			/* IEU0		Group		*/
+	add		%o1, 8, %o1			/* IEU1				*/
+	sub		%o3, %g1, %o4			/* IEU0		Group		*/
+
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o4, %o3, %o4			/* IEU0		Group		*/
+#endif
+	andcc		%o4, %g2, %g0			/* IEU1		Group		*/
+	be,a,pt		%xcc, 15b			/* CTI				*/
+	 stx		%o3, [%o0 - 8]			/* Store			*/
+	srlx		%o3, 56, %o4			/* IEU0		Group		*/
+
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 22f			/* CTI				*/
+	 srlx		%o3, 48, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 21f			/* CTI				*/
+	 srlx		%o3, 40, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 20f			/* CTI				*/
+
+	 srlx		%o3, 32, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 19f			/* CTI				*/
+	 srlx		%o3, 24, %o4			/* IEU0				*/
+
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 18f			/* CTI				*/
+	 srlx		%o3, 16, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 17f			/* CTI				*/
+	 srlx		%o3, 8, %o4			/* IEU0				*/
+	andcc		%o4, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 16f			/* CTI				*/
+
+	 andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	bne,pn		%icc, 15b			/* CTI				*/
+	 stx		%o3, [%o0 - 8]			/* Store			*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 mov		%g6, %o0			/* IEU0				*/
+
+	.align		16
+16:	srlx		%o3, 8, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 2]			/* Store			*/
+17:	srlx		%o3, 16, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 3]			/* Store			*/
+
+18:	srlx		%o3, 24, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 4]			/* Store			*/
+19:	srlx		%o3, 32, %o4			/* IEU0		Group		*/
+	stw		%o4, [%o0 - 8]			/* Store			*/
+
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0 			/* IEU0				*/
+	nop
+	nop
+
+20:	srlx		%o3, 40, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 6]			/* Store			*/
+21:	srlx		%o3, 48, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 7]			/* Store			*/
+
+22:	srlx		%o3, 56, %o4			/* IEU0		Group		*/
+	stb		%o4, [%o0 - 8]			/* Store			*/
+	retl						/* CTI+IEU1	Group		*/
+	 mov		%g6, %o0			/* IEU0				*/
+END(strcpy)
+
+libc_hidden_def(strcpy)
diff --git a/ap/build/uClibc/libc/string/sparc/sparc64/strlen.S b/ap/build/uClibc/libc/string/sparc/sparc64/strlen.S
new file mode 100644
index 0000000..1fe8549
--- /dev/null
+++ b/ap/build/uClibc/libc/string/sparc/sparc64/strlen.S
@@ -0,0 +1,173 @@
+/* Determine the length of a string.  For SPARC v9.
+   Copyright (C) 1998, 1999, 2003 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jan Vondrak <jvon4518@ss1000.ms.mff.cuni.cz> and
+                  Jakub Jelinek <jj@ultra.linux.cz>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <asm/asi.h>
+
+	/* Normally, this uses
+	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
+	   to find out if any byte in xword could be zero. This is fast, but
+	   also gives false alarm for any byte in range 0x81-0xff. It does
+	   not matter for correctness, as if this test tells us there could
+	   be some zero byte, we check it byte by byte, but if bytes with
+	   high bits set are common in the strings, then this will give poor
+	   performance. You can #define EIGHTBIT_NOT_RARE and the algorithm
+	   will use one tick slower, but more precise test
+	   ((xword - 0x0101010101010101) & (~xword) & 0x8080808080808080),
+	   which does not give any false alarms (but if some bits are set,
+	   one cannot assume from it which bytes are zero and which are not).
+	   It is yet to be measured, what is the correct default for glibc
+	   in these days for an average user.
+	 */
+
+	.text
+	.align		32
+ENTRY(strlen)
+	sethi		%hi(0x01010101), %g1		/* IEU0		Group		*/
+	ldub		[%o0], %o3			/* Load				*/
+	or		%g1, %lo(0x01010101), %g1	/* IEU0		Group		*/
+	mov		%o0, %o1			/* IEU1				*/
+
+	sllx		%g1, 32, %g4			/* IEU0		Group 		*/
+	andcc		%o0, 7, %g0			/* IEU1				*/
+	or		%g1, %g4, %g1			/* IEU0		Group		*/
+	brz,pn		%o3, 13f			/* CTI+IEU1			*/
+
+	 sllx		%g1, 7, %g4			/* IEU0		Group		*/
+	bne,a,pn	%icc, 15f			/* CTI				*/
+	 add		%o0, 1, %o0			/* IEU1				*/
+							/* %g1 = 0x0101010101010101	*
+							 * %g4 = 0x8080808080808080	*
+							 * %o0 = string pointer		*
+							 * %o1 = start of string	*/
+1:	ldx		[%o0], %o3			/* Load		Group		*/
+
+	add		%o0, 8, %o0			/* IEU1				*/
+2:	sub		%o3, %g1, %o2			/* IEU0		Group		*/
+#ifdef EIGHTBIT_NOT_RARE
+	andn		%o2, %o3, %o5			/* IEU0		Group		*/
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%o5, %g4, %g0			/* IEU1		Group		*/
+#else
+	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%o2, %g4, %g0			/* IEU1		Group		*/
+#endif
+
+	be,pt		%xcc, 2b			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU0				*/
+ 	addcc		%o2, %g1, %g5			/* IEU1		Group		*/
+#ifdef EIGHTBIT_NOT_RARE
+	srlx		%o5, 32, %o5			/* IEU0				*/
+
+3:	andcc		%o5, %g4, %g0			/* IEU1		Group		*/
+#else
+	srlx		%o2, 32, %o2			/* IEU0				*/
+
+3:	andcc		%o2, %g4, %g0			/* IEU1		Group		*/
+#endif
+	be,pn		%xcc, 4f			/* CTI				*/
+	 srlx		%g5, 56, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 12f			/* CTI				*/
+	 srlx		%g5, 48, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 11f			/* CTI				*/
+
+	 srlx		%g5, 40, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 10f			/* CTI				*/
+	 srlx		%g5, 32, %o2			/* IEU0				*/
+
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 9f			/* CTI				*/
+4:	 srlx		%g5, 24, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+
+	be,pn		%icc, 8f			/* CTI				*/
+	 srlx		%g5, 16, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 7f			/* CTI				*/
+
+	 srlx		%g5, 8, %o2			/* IEU0				*/
+	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 6f			/* CTI				*/
+	 sub		%o3, %g1, %o2			/* IEU0				*/
+
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
+	be,pn		%icc, 5f			/* CTI				*/
+	 ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
+	andcc		%o2, %g4, %g0			/* IEU1		Group		*/
+
+	be,pt		%xcc, 2b			/* CTI				*/
+	 add		%o0, 8, %o0			/* IEU0				*/
+	addcc		%o2, %g1, %g5			/* IEU1		Group		*/
+	ba,pt		%xcc, 3b			/* CTI				*/
+
+	 srlx		%o2, 32, %o2			/* IEU0				*/
+5:	add		%o0, -9, %o0			/* IEU0		Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+
+6:	add		%o0, -10, %o0			/* IEU0		Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+7:	add		%o0, -11, %o0			/* IEU0		Group		*/
+
+	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+8:	add		%o0, -12, %o0			/* IEU0		Group		*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+9:	add		%o0, -13, %o0			/* IEU0		Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+
+10:	add		%o0, -14, %o0			/* IEU0		Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+11:	add		%o0, -15, %o0			/* IEU0		Group		*/
+
+	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+12:	add		%o0, -16, %o0			/* IEU0		Group		*/
+	retl						/* CTI+IEU1	Group		*/
+
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+13:	retl						/* CTI+IEU1	Group		*/
+	 mov		0, %o0				/* IEU0				*/
+	nop
+
+15:	ldub		[%o0], %o3			/* Load		Group		*/
+16:	andcc		%o0, 7, %g0			/* IEU1				*/
+	be,pn		%icc, 1b			/* CTI				*/
+	 nop						/* IEU0		Group		*/
+
+	add		%o0, 1, %o0			/* IEU1				*/
+	andcc		%o3, 0xff, %g0			/* IEU1		Group		*/
+	bne,a,pt	%icc, 16b			/* CTI				*/
+	 lduba		[%o0] ASI_PNF, %o3		/* Load				*/
+
+	add		%o0, -1, %o0			/* IEU0		Group		*/
+	retl						/* CTI+IEU1	Group		*/
+	 sub		%o0, %o1, %o0			/* IEU0				*/
+END(strlen)
+libc_hidden_def(strlen)