[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit

Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/build/uClibc/libc/string/ia64/Makefile b/ap/build/uClibc/libc/string/ia64/Makefile
new file mode 100644
index 0000000..0a95346
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/Makefile
@@ -0,0 +1,13 @@
+# Makefile for uClibc
+#
+# Copyright (C) 2000-2005 Erik Andersen <andersen@uclibc.org>
+#
+# Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+#
+
+top_srcdir:=../../../
+top_builddir:=../../../
+all: objs
+include $(top_builddir)Rules.mak
+include ../Makefile.in
+include $(top_srcdir)Makerules
diff --git a/ap/build/uClibc/libc/string/ia64/bcopy.S b/ap/build/uClibc/libc/string/ia64/bcopy.S
new file mode 100644
index 0000000..c5637c3
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/bcopy.S
@@ -0,0 +1,14 @@
+#include "sysdep.h"
+
+#ifdef __UCLIBC_SUSV3_LEGACY__
+
+ENTRY(bcopy)
+	.regstk 3, 0, 0, 0
+	mov r8 = in0
+	mov in0 = in1
+	;;
+	mov in1 = r8
+	br.cond.sptk.many HIDDEN_JUMPTARGET(memmove)
+END(bcopy)
+
+#endif
diff --git a/ap/build/uClibc/libc/string/ia64/bzero.S b/ap/build/uClibc/libc/string/ia64/bzero.S
new file mode 100644
index 0000000..1f0f8b7
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/bzero.S
@@ -0,0 +1,320 @@
+/* Optimized version of the standard bzero() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+   Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
+   Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: dest
+
+   Inputs:
+        in0:    dest
+        in1:    count
+
+   The algorithm is fairly straightforward: set byte by byte until we
+   we get to a 16B-aligned address, then loop on 128 B chunks using an
+   early store as prefetching, then loop on 32B chucks, then clear remaining
+   words, finally clear remaining bytes.
+   Since a stf.spill f0 can store 16B in one go, we use this instruction
+   to get peak speed.  */
+
+#include "sysdep.h"
+
+#ifdef __UCLIBC_SUSV3_LEGACY__
+
+#undef ret
+
+#define dest		in0
+#define	cnt		in1
+
+#define tmp		r31
+#define save_lc		r30
+#define ptr0		r29
+#define ptr1		r28
+#define ptr2		r27
+#define ptr3		r26
+#define ptr9		r24
+#define	loopcnt		r23
+#define linecnt		r22
+#define bytecnt		r21
+
+/* This routine uses only scratch predicate registers (p6 - p15) */
+#define p_scr		p6	/* default register for same-cycle branches */
+#define p_unalgn	p9
+#define p_y		p11
+#define p_n		p12
+#define p_yy		p13
+#define p_nn		p14
+
+#define movi0		mov
+
+#define MIN1		15
+#define MIN1P1HALF	8
+#define LINE_SIZE	128
+#define LSIZE_SH        7			/* shift amount */
+#define PREF_AHEAD	8
+
+#define USE_FLP
+#if defined(USE_INT)
+#define store		st8
+#define myval		r0
+#elif defined(USE_FLP)
+#define store		stf8
+#define myval		f0
+#endif
+
+.align	64
+ENTRY(bzero)
+{ .mmi
+	.prologue
+	alloc	tmp = ar.pfs, 2, 0, 0, 0
+	lfetch.nt1 [dest]
+	.save   ar.lc, save_lc
+	movi0	save_lc = ar.lc
+} { .mmi
+	.body
+	mov	ret0 = dest		/* return value */
+	nop.m	0
+	cmp.eq	p_scr, p0 = cnt, r0
+;; }
+{ .mmi
+	and	ptr2 = -(MIN1+1), dest	/* aligned address */
+	and	tmp = MIN1, dest	/* prepare to check for alignment */
+	tbit.nz p_y, p_n = dest, 0	/* Do we have an odd address? (M_B_U) */
+} { .mib
+	mov	ptr1 = dest
+	nop.i	0
+(p_scr)	br.ret.dpnt.many rp		/* return immediately if count = 0 */
+;; }
+{ .mib
+	cmp.ne	p_unalgn, p0 = tmp, r0
+} { .mib					/* NB: # of bytes to move is 1 */
+	sub	bytecnt = (MIN1+1), tmp		/*     higher than loopcnt */
+	cmp.gt	p_scr, p0 = 16, cnt		/* is it a minimalistic task? */
+(p_scr)	br.cond.dptk.many .move_bytes_unaligned	/* go move just a few (M_B_U) */
+;; }
+{ .mmi
+(p_unalgn) add	ptr1 = (MIN1+1), ptr2		/* after alignment */
+(p_unalgn) add	ptr2 = MIN1P1HALF, ptr2		/* after alignment */
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3	/* should we do a st8 ? */
+;; }
+{ .mib
+(p_y)	add	cnt = -8, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2	/* should we do a st4 ? */
+} { .mib
+(p_y)	st8	[ptr2] = r0,-4
+(p_n)	add	ptr2 = 4, ptr2
+;; }
+{ .mib
+(p_yy)	add	cnt = -4, cnt
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1	/* should we do a st2 ? */
+} { .mib
+(p_yy)	st4	[ptr2] = r0,-2
+(p_nn)	add	ptr2 = 2, ptr2
+;; }
+{ .mmi
+	mov	tmp = LINE_SIZE+1		/* for compare */
+(p_y)	add	cnt = -2, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0	/* should we do a st1 ? */
+} { .mmi
+	nop.m	0
+(p_y)	st2	[ptr2] = r0,-1
+(p_n)	add	ptr2 = 1, ptr2
+;; }
+
+{ .mmi
+(p_yy)	st1	[ptr2] = r0
+	cmp.gt	p_scr, p0 = tmp, cnt		/* is it a minimalistic task? */
+} { .mbb
+(p_yy)	add	cnt = -1, cnt
+(p_scr)	br.cond.dpnt.many .fraction_of_line	/* go move just a few */
+;; }
+{ .mib
+	nop.m	0
+	shr.u	linecnt = cnt, LSIZE_SH
+	nop.b	0
+;; }
+
+	.align 32
+.l1b:	/* ------------------  L1B: store ahead into cache lines; fill later */
+{ .mmi
+	and	tmp = -(LINE_SIZE), cnt		/* compute end of range */
+	mov	ptr9 = ptr1			/* used for prefetching */
+	and	cnt = (LINE_SIZE-1), cnt	/* remainder */
+} { .mmi
+	mov	loopcnt = PREF_AHEAD-1		/* default prefetch loop */
+	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	/* check against actual value */
+;; }
+{ .mmi
+(p_scr)	add	loopcnt = -1, linecnt
+	add	ptr2 = 16, ptr1	/* start of stores (beyond prefetch stores) */
+	add	ptr1 = tmp, ptr1	/* first address beyond total range */
+;; }
+{ .mmi
+	add	tmp = -1, linecnt	/* next loop count */
+	movi0	ar.lc = loopcnt
+;; }
+.pref_l1b:
+{ .mib
+	stf.spill [ptr9] = f0, 128	/* Do stores one cache line apart */
+	nop.i   0
+	br.cloop.dptk.few .pref_l1b
+;; }
+{ .mmi
+	add	ptr0 = 16, ptr2		/* Two stores in parallel */
+	movi0	ar.lc = tmp
+;; }
+.l1bx:
+ { .mmi
+	stf.spill [ptr2] = f0, 32
+	stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+	stf.spill [ptr2] = f0, 32
+	stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+	stf.spill [ptr2] = f0, 32
+	stf.spill [ptr0] = f0, 64
+	cmp.lt	p_scr, p0 = ptr9, ptr1	/* do we need more prefetching? */
+ ;; }
+{ .mmb
+	stf.spill [ptr2] = f0, 32
+(p_scr)	stf.spill [ptr9] = f0, 128
+	br.cloop.dptk.few .l1bx
+;; }
+{ .mib
+	cmp.gt  p_scr, p0 = 8, cnt	/* just a few bytes left ? */
+(p_scr)	br.cond.dpnt.many  .move_bytes_from_alignment
+;; }
+
+.fraction_of_line:
+{ .mib
+	add	ptr2 = 16, ptr1
+	shr.u	loopcnt = cnt, 5	/* loopcnt = cnt / 32 */
+;; }
+{ .mib
+	cmp.eq	p_scr, p0 = loopcnt, r0
+	add	loopcnt = -1, loopcnt
+(p_scr)	br.cond.dpnt.many .store_words
+;; }
+{ .mib
+	and	cnt = 0x1f, cnt		/* compute the remaining cnt */
+	movi0   ar.lc = loopcnt
+;; }
+	.align 32
+.l2:	/* -----------------------------  L2A:  store 32B in 2 cycles */
+{ .mmb
+	store	[ptr1] = myval, 8
+	store	[ptr2] = myval, 8
+;; } { .mmb
+	store	[ptr1] = myval, 24
+	store	[ptr2] = myval, 24
+	br.cloop.dptk.many .l2
+;; }
+.store_words:
+{ .mib
+	cmp.gt	p_scr, p0 = 8, cnt	/* just a few bytes left ? */
+(p_scr)	br.cond.dpnt.many .move_bytes_from_alignment	/* Branch */
+;; }
+
+{ .mmi
+	store	[ptr1] = myval, 8	/* store */
+	cmp.le	p_y, p_n = 16, cnt	/* */
+	add	cnt = -8, cnt		/* subtract */
+;; }
+{ .mmi
+(p_y)	store	[ptr1] = myval, 8	/* store */
+(p_y)	cmp.le.unc p_yy, p_nn = 16, cnt
+(p_y)	add	cnt = -8, cnt		/* subtract */
+;; }
+{ .mmi					/* store */
+(p_yy)	store	[ptr1] = myval, 8
+(p_yy)	add	cnt = -8, cnt		/* subtract */
+;; }
+
+.move_bytes_from_alignment:
+{ .mib
+	cmp.eq	p_scr, p0 = cnt, r0
+	tbit.nz.unc p_y, p0 = cnt, 2	/* should we terminate with a st4 ? */
+(p_scr)	br.cond.dpnt.few .restore_and_exit
+;; }
+{ .mib
+(p_y)	st4	[ptr1] = r0,4
+	tbit.nz.unc p_yy, p0 = cnt, 1	/* should we terminate with a st2 ? */
+;; }
+{ .mib
+(p_yy)	st2	[ptr1] = r0,2
+	tbit.nz.unc p_y, p0 = cnt, 0	/* should we terminate with a st1 ? */
+;; }
+
+{ .mib
+(p_y)	st1	[ptr1] = r0
+;; }
+.restore_and_exit:
+{ .mib
+	nop.m	0
+	movi0	ar.lc = save_lc
+	br.ret.sptk.many rp
+;; }
+
+.move_bytes_unaligned:
+{ .mmi
+       .pred.rel "mutex",p_y, p_n
+       .pred.rel "mutex",p_yy, p_nn
+(p_n)	cmp.le  p_yy, p_nn = 4, cnt
+(p_y)	cmp.le  p_yy, p_nn = 5, cnt
+(p_n)	add	ptr2 = 2, ptr1
+} { .mmi
+(p_y)	add	ptr2 = 3, ptr1
+(p_y)	st1	[ptr1] = r0, 1		/* fill 1 (odd-aligned) byte */
+(p_y)	add	cnt = -1, cnt		/* [15, 14 (or less) left] */
+;; }
+{ .mmi
+(p_yy)	cmp.le.unc p_y, p0 = 8, cnt
+	add	ptr3 = ptr1, cnt	/* prepare last store */
+	movi0	ar.lc = save_lc
+} { .mmi
+(p_yy)	st2	[ptr1] = r0, 4		/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = r0, 4		/* fill 2 (aligned) bytes */
+(p_yy)	add	cnt = -4, cnt		/* [11, 10 (o less) left] */
+;; }
+{ .mmi
+(p_y)	cmp.le.unc p_yy, p0 = 8, cnt
+	add	ptr3 = -1, ptr3		/* last store */
+	tbit.nz p_scr, p0 = cnt, 1	/* will there be a st2 at the end ? */
+} { .mmi
+(p_y)	st2	[ptr1] = r0, 4		/* fill 2 (aligned) bytes */
+(p_y)	st2	[ptr2] = r0, 4		/* fill 2 (aligned) bytes */
+(p_y)	add	cnt = -4, cnt		/* [7, 6 (or less) left] */
+;; }
+{ .mmi
+(p_yy)	st2	[ptr1] = r0, 4		/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = r0, 4		/* fill 2 (aligned) bytes */
+					/* [3, 2 (or less) left] */
+	tbit.nz p_y, p0 = cnt, 0	/* will there be a st1 at the end ? */
+} { .mmi
+(p_yy)	add	cnt = -4, cnt
+;; }
+{ .mmb
+(p_scr)	st2	[ptr1] = r0		/* fill 2 (aligned) bytes */
+(p_y)	st1	[ptr3] = r0		/* fill last byte (using ptr3) */
+	br.ret.sptk.many rp
+;; }
+END(bzero)
+
+#endif
diff --git a/ap/build/uClibc/libc/string/ia64/memccpy.S b/ap/build/uClibc/libc/string/ia64/memccpy.S
new file mode 100644
index 0000000..259d680
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/memccpy.S
@@ -0,0 +1,213 @@
+/* Optimized version of the memccpy() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: a pointer to the next byte after char in dest or NULL
+
+   Inputs:
+        in0:    dest
+        in1:    src
+	in2:	char
+        in3:    byte count
+
+   This implementation assumes little endian mode (UM.be = 0).
+
+   This implementation assumes that it is safe to do read ahead
+   in the src block, without getting beyond its limit.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define OP_T_THRES 	16
+#define OPSIZ 		8
+
+#define saved_pr	r17
+#define saved_lc	r18
+#define dest		r19
+#define src		r20
+#define len		r21
+#define asrc		r22
+#define tmp		r23
+#define char		r24
+#define charx8		r25
+#define saved_ec	r26
+#define sh2		r28
+#define	sh1		r29
+#define loopcnt		r30
+#define	value		r31
+
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+/* Manually force proper loop-alignment.  Note: be sure to
+   double-check the code-layout after making any changes to
+   this routine! */
+# define ALIGN(n)	{ nop 0 }
+#else
+# define ALIGN(n)	.align n
+#endif
+
+ENTRY(memccpy)
+	.prologue
+	alloc 	r2 = ar.pfs, 4, 40 - 4, 0, 40
+
+#include "softpipe.h"
+	.rotr	r[MEMLAT + 7], tmp1[4], tmp2[4], val[4], tmp3[2], pos0[2]
+	.rotp	p[MEMLAT + 6 + 1]
+
+	mov	ret0 = r0		/* return NULL if no match */
+	.save pr, saved_pr
+	mov	saved_pr = pr		/* save the predicate registers */
+	mov 	dest = in0		/* dest */
+	.save ar.lc, saved_lc
+        mov 	saved_lc = ar.lc	/* save the loop counter */
+        mov 	saved_ec = ar.ec	/* save the loop counter */
+	.body
+	mov 	src = in1		/* src */
+	extr.u	char = in2, 0, 8	/* char */
+	mov	len = in3		/* len */
+	sub	tmp = r0, in0		/* tmp = -dest */
+	cmp.ne	p7, p0 = r0, r0		/* clear p7 */
+	;;
+	and	loopcnt = 7, tmp	/* loopcnt = -dest % 8 */
+	cmp.ge	p6, p0 = OP_T_THRES, len	/* is len <= OP_T_THRES */
+	mov	ar.ec = 0		/* ec not guaranteed zero on entry */
+(p6)	br.cond.spnt	.cpyfew		/* copy byte by byte */
+	;;
+	cmp.eq	p6, p0 = loopcnt, r0
+	mux1	charx8 = char, @brcst
+(p6)	br.cond.sptk .dest_aligned
+	sub	len = len, loopcnt	/* len -= -dest % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	;;
+	mov	ar.lc = loopcnt
+.l1:					/* copy -dest % 8 bytes */
+	ld1	value = [src], 1	/* value = *src++ */
+	;;
+	st1	[dest] = value, 1	/* *dest++ = value */
+	cmp.eq	p6, p0 = value, char
+(p6)	br.cond.spnt .foundit
+	br.cloop.dptk .l1
+.dest_aligned:
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	and	tmp = -8, len   	/* tmp = len & -OPSIZ */
+	and	asrc = -8, src		/* asrc = src & -OPSIZ  -- align src */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and	len = 7, len ;;		/* len = len % 8 */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	mov     pr.rot = 1 << 16 ;;	/* set rotating predicates */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	mov	ar.lc = loopcnt		/* set LC */
+	cmp.eq  p6, p0 = sh1, r0 	/* is the src aligned? */
+(p6)    br.cond.sptk .src_aligned ;;
+	add	src = src, tmp		/* src += len & -OPSIZ */
+	mov	ar.ec = MEMLAT + 6 + 1 	/* six more passes needed */
+	ld8	r[1] = [asrc], 8 	/* r[1] = w0 */
+	cmp.ne	p6, p0 = r0, r0	;;	/* clear p6 */
+	ALIGN(32)
+.l2:
+(p[0])		ld8.s	r[0] = [asrc], 8		/* r[0] = w1 */
+(p[MEMLAT])	shr.u	tmp1[0] = r[1 + MEMLAT], sh1	/* tmp1 = w0 >> sh1 */
+(p[MEMLAT])	shl	tmp2[0] = r[0 + MEMLAT], sh2  	/* tmp2 = w1 << sh2 */
+(p[MEMLAT+4])	xor	tmp3[0] = val[1], charx8
+(p[MEMLAT+5])	czx1.r	pos0[0] = tmp3[1]
+(p[MEMLAT+6])	chk.s	r[6 + MEMLAT], .recovery1	/* our data isn't */
+							/* valid - rollback! */
+(p[MEMLAT+6])	cmp.ne	p6, p0 = 8, pos0[1]
+(p6)		br.cond.spnt	.gotit
+(p[MEMLAT+6])	st8	[dest] = val[3], 8		/* store val to dest */
+(p[MEMLAT+3])	or	val[0] = tmp1[3], tmp2[3] 	/* val = tmp1 | tmp2 */
+		br.ctop.sptk    .l2
+		br.cond.sptk .cpyfew
+
+.src_aligned:
+		cmp.ne  p6, p0 = r0, r0			/* clear p6 */
+		mov     ar.ec = MEMLAT + 2 + 1 ;;	/* set EC */
+.l3:
+(p[0])		ld8.s	r[0] = [src], 8
+(p[MEMLAT])	xor	tmp3[0] = r[MEMLAT], charx8
+(p[MEMLAT+1])	czx1.r	pos0[0] = tmp3[1]
+(p[MEMLAT+2])	cmp.ne	p7, p0 = 8, pos0[1]
+(p[MEMLAT+2])	chk.s	r[MEMLAT+2], .recovery2
+(p7)		br.cond.spnt	.gotit
+.back2:
+(p[MEMLAT+2])	st8	[dest] = r[MEMLAT+2], 8
+		br.ctop.dptk .l3
+.cpyfew:
+	cmp.eq	p6, p0 = len, r0	/* is len == 0 ? */
+	adds	len = -1, len		/* --len; */
+(p6)	br.cond.spnt	.restore_and_exit ;;
+	mov	ar.lc = len
+.l4:
+	ld1	value = [src], 1
+	;;
+	st1	[dest] = value, 1
+	cmp.eq	p6, p0 = value, char
+(p6)	br.cond.spnt .foundit
+	br.cloop.dptk	.l4 ;;
+.foundit:
+(p6)	mov	ret0 = dest
+.restore_and_exit:
+	mov     pr = saved_pr, -1    	/* restore the predicate registers */
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	mov 	ar.ec = saved_ec ;;	/* restore the epilog counter */
+	br.ret.sptk.many b0
+.gotit:
+	.pred.rel "mutex" p6, p7
+(p6)	mov	value = val[3]		/* if coming from l2 */
+(p7)	mov	value = r[MEMLAT+2]	/* if coming from l3 */
+	mov	ar.lc = pos0[1] ;;
+.l5:
+	extr.u	tmp = value, 0, 8 ;;
+	st1	[dest] = tmp, 1
+	shr.u	value = value, 8
+	br.cloop.sptk .l5 ;;
+	mov 	ret0 = dest
+	mov	pr = saved_pr, -1
+	mov	ar.lc = saved_lc
+	br.ret.sptk.many b0
+
+.recovery1:
+	adds	src = -(MEMLAT + 6 + 1) * 8, asrc
+	mov	loopcnt = ar.lc
+	mov	tmp = ar.ec ;;
+	sub	sh1 = (MEMLAT + 6 + 1), tmp
+	shr.u	sh2 = sh2, 3
+	;; 
+	shl	loopcnt = loopcnt, 3
+	sub	src = src, sh2
+	shl	sh1 = sh1, 3
+	shl	tmp = tmp, 3
+	;;
+	add	len = len, loopcnt
+	add	src = sh1, src ;;
+	add	len = tmp, len
+.back1:
+	br.cond.sptk .cpyfew
+
+.recovery2:
+	add	tmp = -(MEMLAT + 3) * 8, src
+(p7)	br.cond.spnt .gotit
+	;;
+	ld8	r[MEMLAT+2] = [tmp] ;;
+	xor	pos0[1] = r[MEMLAT+2], charx8 ;;
+	czx1.r	pos0[1] = pos0[1] ;;
+	cmp.ne	p7, p6 = 8, pos0[1]
+(p7)	br.cond.spnt .gotit
+	br.cond.sptk .back2
+END(memccpy)
diff --git a/ap/build/uClibc/libc/string/ia64/memchr.S b/ap/build/uClibc/libc/string/ia64/memchr.S
new file mode 100644
index 0000000..f25e803
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/memchr.S
@@ -0,0 +1,131 @@
+/* Optimized version of the standard memchr() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: the address of the first occurence of chr in str or NULL
+
+   Inputs:
+  	in0:	str
+  	in1:	chr
+  	in2:	byte count
+
+   This implementation assumes little endian mode.  For big endian mode,
+   the instruction czx1.r should be replaced by czx1.l.
+
+   The algorithm is fairly straightforward: search byte by byte until we
+   we get to a word aligned address, then search word by word as much as
+   possible; the remaining few bytes are searched one at a time.
+
+   The word by word search is performed by xor-ing the word with a word
+   containing chr in every byte.  If there is a hit, the result will
+   contain a zero byte in the corresponding position.  The presence and
+   position of that zero byte is detected with a czx instruction.
+
+   All the loops in this function could have had the internal branch removed
+   if br.ctop and br.cloop could be predicated :-(.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define saved_pr	r15
+#define saved_lc	r16
+#define	chr		r17
+#define len		r18
+#define pos0		r20
+#define val		r21
+#define tmp		r24
+#define chrx8		r25
+#define loopcnt		r30
+
+#define str		in0
+
+ENTRY(__memchr)
+	.prologue
+	alloc r2 = ar.pfs, 3, 0, 29, 32
+#include "softpipe.h"
+	.rotr	value[MEMLAT+1], addr[MEMLAT+3], aux[2], poschr[2]
+	.rotp	p[MEMLAT+3]
+	.save ar.lc, saved_lc
+        mov 	saved_lc = ar.lc 	/* save the loop counter */
+	.save pr, saved_pr
+	mov	saved_pr = pr		/* save the predicates */
+	.body
+	mov 	ret0 = str
+	and 	tmp = 7, str		/* tmp = str % 8 */
+	cmp.ne	p7, p0 = r0, r0		/* clear p7 */
+	extr.u	chr = in1, 0, 8		/* chr = (unsigned char) in1 */
+	mov	len = in2
+	cmp.gtu	p6, p0 = 16, in2	/* use a simple loop for short */
+(p6)	br.cond.spnt .srchfew ;;	/* searches */
+	sub	loopcnt = 8, tmp	/* loopcnt = 8 - tmp */
+	cmp.eq	p6, p0 = tmp, r0
+(p6)	br.cond.sptk	.str_aligned;;
+	sub	len = len, loopcnt
+	adds	loopcnt = -1, loopcnt;;
+	mov	ar.lc = loopcnt
+.l1:
+	ld1	val = [ret0], 1
+	;;
+	cmp.eq	p6, p0 = val, chr
+(p6)	br.cond.spnt	.foundit
+	br.cloop.sptk	.l1 ;;
+.str_aligned:
+	cmp.ne	p6, p0 = r0, r0		/* clear p6 */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and 	len = 7, len ;;		/* remaining len = len & 7 */
+	adds	loopcnt = -1, loopcnt
+	mov	ar.ec = MEMLAT + 3
+	mux1	chrx8 = chr, @brcst ;;	/* get a word full of chr */
+	mov	ar.lc = loopcnt
+	mov	pr.rot = 1 << 16 ;;
+.l2:
+(p[0])		mov	addr[0] = ret0
+(p[0])		ld8	value[0] = [ret0], 8
+(p[MEMLAT])	xor	aux[0] = value[MEMLAT], chrx8
+(p[MEMLAT+1])	czx1.r	poschr[0] = aux[1]
+(p[MEMLAT+2])	cmp.ne	p7, p0 = 8, poschr[1]
+(p7)		br.cond.dpnt .foundit
+		br.ctop.dptk .l2
+.srchfew:
+	adds	loopcnt = -1, len
+	cmp.eq	p6, p0 = len, r0
+(p6)	br.cond.spnt .notfound ;;
+	mov	ar.lc = loopcnt
+.l3:
+	ld1	val = [ret0], 1
+	;;
+	cmp.eq	p6, p0 = val, chr
+(p6)	br.cond.dpnt	.foundit
+	br.cloop.sptk	.l3 ;;
+.notfound:
+	cmp.ne	p6, p0 = r0, r0	/* clear p6 (p7 was already 0 when we got here) */
+	mov	ret0 = r0 ;;	/* return NULL */
+.foundit:
+	.pred.rel "mutex" p6, p7
+(p6)	adds	ret0 = -1, ret0 		   /* if we got here from l1 or l3 */
+(p7)	add	ret0 = addr[MEMLAT+2], poschr[1]   /* if we got here from l2 */
+	mov	pr = saved_pr, -1
+	mov	ar.lc = saved_lc
+	br.ret.sptk.many b0
+
+END(__memchr)
+
+weak_alias(__memchr, memchr)
+weak_alias(__memchr, __ubp_memchr)
+libc_hidden_def(memchr)
diff --git a/ap/build/uClibc/libc/string/ia64/memcmp.S b/ap/build/uClibc/libc/string/ia64/memcmp.S
new file mode 100644
index 0000000..adb1a20
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/memcmp.S
@@ -0,0 +1,166 @@
+/* Optimized version of the standard memcmp() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2004 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: the result of the comparison
+
+   Inputs:
+        in0:    dest (aka s1)
+        in1:    src  (aka s2)
+        in2:    byte count
+
+   In this form, it assumes little endian mode.  For big endian mode, the
+   the two shifts in .l2 must be inverted:
+
+	shl	tmp1[0] = r[1 + MEMLAT], sh1   // tmp1 = w0 << sh1
+	shr.u   tmp2[0] = r[0 + MEMLAT], sh2   // tmp2 = w1 >> sh2
+
+   and all the mux1 instructions should be replaced by plain mov's.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define OP_T_THRES	16
+#define OPSIZ		8
+#define MEMLAT		2
+
+#define start		r15
+#define saved_pr	r17
+#define saved_lc	r18
+#define dest		r19
+#define src		r20
+#define len		r21
+#define asrc		r22
+#define tmp		r23
+#define value1		r24
+#define value2		r25
+#define sh2		r28
+#define	sh1		r29
+#define loopcnt		r30
+
+ENTRY(memcmp)
+	.prologue
+	alloc	r2 = ar.pfs, 3, 37, 0, 40
+
+	.rotr	r[MEMLAT + 2], q[MEMLAT + 5], tmp1[4], tmp2[4], val[2]
+	.rotp	p[MEMLAT + 4 + 1]
+
+	mov	ret0 = r0		/* by default return value = 0 */
+	.save pr, saved_pr
+	mov	saved_pr = pr		/* save the predicate registers */
+	.save ar.lc, saved_lc
+        mov	saved_lc = ar.lc	/* save the loop counter */
+	.body
+	mov	dest = in0		/* dest */
+	mov	src = in1		/* src */
+	mov	len = in2		/* len */
+	sub	tmp = r0, in0		/* tmp = -dest */
+	;;
+	and	loopcnt = 7, tmp		/* loopcnt = -dest % 8 */
+	cmp.ge	p6, p0 = OP_T_THRES, len	/* is len <= OP_T_THRES */
+(p6)	br.cond.spnt	.cmpfew			/* compare byte by byte */
+	;;
+	cmp.eq	p6, p0 = loopcnt, r0
+(p6)	br.cond.sptk .dest_aligned
+	sub	len = len, loopcnt	/* len -= -dest % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	;;
+	mov	ar.lc = loopcnt
+.l1:					/* copy -dest % 8 bytes */
+	ld1	value1 = [src], 1	/* value = *src++ */
+	ld1	value2 = [dest], 1
+	;;
+	cmp.ne	p6, p0 = value1, value2
+(p6)	br.cond.spnt .done
+	br.cloop.dptk .l1
+.dest_aligned:
+	and	sh1 = 7, src		/* sh1 = src % 8 */
+	and	tmp = -8, len		/* tmp = len & -OPSIZ */
+	and	asrc = -8, src		/* asrc = src & -OPSIZ  -- align src */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and	len = 7, len ;;		/* len = len % 8 */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	mov     pr.rot = 1 << 16 ;;	/* set rotating predicates */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	mov	ar.lc = loopcnt		/* set LC */
+	cmp.eq  p6, p0 = sh1, r0	/* is the src aligned? */
+(p6)    br.cond.sptk .src_aligned
+	add	src = src, tmp		/* src += len & -OPSIZ */
+	mov	ar.ec = MEMLAT + 4 + 1	/* four more passes needed */
+	ld8	r[1] = [asrc], 8 ;;	/* r[1] = w0 */
+	.align	32
+
+/* We enter this loop with p6 cleared by the above comparison */
+
+.l2:
+(p[0])		ld8	r[0] = [asrc], 8		/* r[0] = w1 */
+(p[0])		ld8	q[0] = [dest], 8
+(p[MEMLAT])	shr.u	tmp1[0] = r[1 + MEMLAT], sh1	/* tmp1 = w0 >> sh1 */
+(p[MEMLAT])	shl	tmp2[0] = r[0 + MEMLAT], sh2	/* tmp2 = w1 << sh2 */
+(p[MEMLAT+4])	cmp.ne	p6, p0 = q[MEMLAT + 4], val[1]
+(p[MEMLAT+3])	or	val[0] = tmp1[3], tmp2[3]	/* val = tmp1 | tmp2 */
+(p6)		br.cond.spnt .l2exit
+		br.ctop.sptk    .l2
+		br.cond.sptk .cmpfew
+.l3exit:
+	mux1	value1 = r[MEMLAT], @rev
+	mux1	value2 = q[MEMLAT], @rev
+	cmp.ne	p6, p0 = r0, r0	;;	/* clear p6 */
+.l2exit:
+(p6)	mux1	value1 = val[1], @rev
+(p6)	mux1	value2 = q[MEMLAT + 4], @rev ;;
+	cmp.ltu	p6, p7 = value2, value1 ;;
+(p6)	mov	ret0 = -1
+(p7)	mov	ret0 = 1
+	mov     pr = saved_pr, -1	/* restore the predicate registers */
+	mov	ar.lc = saved_lc	/* restore the loop counter */
+	br.ret.sptk.many b0
+.src_aligned:
+	cmp.ne	p6, p0 = r0, r0		/* clear p6 */
+	mov     ar.ec = MEMLAT + 1 ;;	/* set EC */
+.l3:
+(p[0])		ld8	r[0] = [src], 8
+(p[0])		ld8	q[0] = [dest], 8
+(p[MEMLAT])	cmp.ne	p6, p0 = r[MEMLAT], q[MEMLAT]
+(p6)		br.cond.spnt .l3exit
+		br.ctop.dptk .l3 ;;
+.cmpfew:
+	cmp.eq	p6, p0 = len, r0	/* is len == 0 ? */
+	adds	len = -1, len		/* --len; */
+(p6)	br.cond.spnt	.restore_and_exit ;;
+	mov	ar.lc = len
+.l4:
+	ld1	value1 = [src], 1
+	ld1	value2 = [dest], 1
+	;;
+	cmp.ne	p6, p0 = value1, value2
+(p6)	br.cond.spnt	.done
+	br.cloop.dptk	.l4 ;;
+.done:
+(p6)	sub	ret0 = value2, value1	/* don't execute it if falling thru */
+.restore_and_exit:
+	mov     pr = saved_pr, -1	/* restore the predicate registers */
+	mov	ar.lc = saved_lc	/* restore the loop counter */
+	br.ret.sptk.many b0
+END(memcmp)
+libc_hidden_def (memcmp)
+#ifdef __UCLIBC_SUSV3_LEGACY__
+strong_alias (memcmp, bcmp)
+#endif
diff --git a/ap/build/uClibc/libc/string/ia64/memcpy.S b/ap/build/uClibc/libc/string/ia64/memcpy.S
new file mode 100644
index 0000000..6c48a72
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/memcpy.S
@@ -0,0 +1,436 @@
+/* Optimized version of the standard memcpy() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
+   Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: dest
+
+   Inputs:
+        in0:    dest
+        in1:    src
+        in2:    byte count
+
+   An assembly implementation of the algorithm used by the generic C
+   version from glibc.  The case when source and sest are aligned is
+   treated separately, for extra performance.
+
+   In this form, memcpy assumes little endian mode.  For big endian mode,
+   sh1 must be computed using an extra instruction: sub sh1 = 64, sh1
+   and the order of r[MEMLAT] and r[MEMLAT+1] must be reverted in the
+   shrp instruction.  */
+
+#define USE_LFETCH
+#define USE_FLP
+#include "sysdep.h"
+#undef ret
+
+#define LFETCH_DIST     500
+
+#define ALIGN_UNROLL_no   4 /* no. of elements */
+#define ALIGN_UNROLL_sh	  2 /* (shift amount) */
+
+#define MEMLAT	8
+#define Nrot	((4*(MEMLAT+2) + 7) & ~7)
+
+#define OP_T_THRES 	16
+#define OPSIZ 		8
+
+#define loopcnt		r14
+#define elemcnt		r15
+#define saved_pr	r16
+#define saved_lc	r17
+#define adest		r18
+#define dest		r19
+#define asrc		r20
+#define src		r21
+#define len		r22
+#define tmp2		r23
+#define tmp3		r24
+#define	tmp4		r25
+#define ptable		r26
+#define ploop56		r27
+#define	loopaddr	r28
+#define	sh1		r29
+#define ptr1		r30
+#define ptr2		r31
+
+#define movi0 		mov
+
+#define p_scr		p6
+#define p_xtr		p7
+#define p_nxtr		p8
+#define p_few		p9
+
+#if defined(USE_FLP)
+#define load		ldf8
+#define store		stf8
+#define tempreg		f6
+#define the_r		fr
+#define the_s		fs
+#define the_t		ft
+#define the_q		fq
+#define the_w		fw
+#define the_x		fx
+#define the_y		fy
+#define the_z		fz
+#elif defined(USE_INT)
+#define load		ld8
+#define store		st8
+#define tempreg		tmp2
+#define the_r		r
+#define the_s		s
+#define the_t		t
+#define the_q		q
+#define the_w		w
+#define the_x		x
+#define the_y		y
+#define the_z		z
+#endif
+
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+/* Manually force proper loop-alignment.  Note: be sure to
+   double-check the code-layout after making any changes to
+   this routine! */
+# define ALIGN(n)	{ nop 0 }
+#else
+# define ALIGN(n)	.align n
+#endif
+
+#if defined(USE_LFETCH)
+#define LOOP(shift)						\
+		ALIGN(32);					\
+.loop##shift :							\
+{ .mmb								\
+(p[0])	ld8.nt1	r[0] = [asrc], 8 ;				\
+(p[0])	lfetch.nt1 [ptr1], 16 ;					\
+	nop.b 0 ;						\
+} { .mib							\
+(p[MEMLAT+1]) st8 [dest] = tmp3, 8 ;				\
+(p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ;		\
+ 	nop.b 0 ;;						\
+ } { .mmb							\
+(p[0])	ld8.nt1	s[0] = [asrc], 8 ;				\
+(p[0])	lfetch.nt1	[ptr2], 16 ;				\
+	nop.b 0 ;						\
+} { .mib							\
+(p[MEMLAT+1]) st8 [dest] = tmp4, 8 ;				\
+(p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ;		\
+	br.ctop.sptk.many .loop##shift 				\
+;; }								\
+{ .mib								\
+	br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */  \
+}
+#else
+#define LOOP(shift)						\
+		ALIGN(32);					\
+.loop##shift :							\
+{ .mmb								\
+(p[0])	ld8.nt1	r[0] = [asrc], 8 ;				\
+	nop.b 0 ;						\
+} { .mib							\
+(p[MEMLAT+1]) st8 [dest] = tmp3, 8 ;				\
+(p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ;		\
+ 	nop.b 0 ;;						\
+ } { .mmb							\
+(p[0])	ld8.nt1	s[0] = [asrc], 8 ;				\
+	nop.b 0 ;						\
+} { .mib							\
+(p[MEMLAT+1]) st8 [dest] = tmp4, 8 ;				\
+(p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ;		\
+	br.ctop.sptk.many .loop##shift 				\
+;; }								\
+{ .mib								\
+	br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */  \
+}
+#endif
+
+
+ENTRY(memcpy)
+{ .mmi
+	.prologue
+	alloc 	r2 = ar.pfs, 3, Nrot - 3, 0, Nrot
+	.rotr	r[MEMLAT+1], s[MEMLAT+2], q[MEMLAT+1], t[MEMLAT+1]
+	.rotp	p[MEMLAT+2]
+	.rotf	fr[MEMLAT+1], fq[MEMLAT+1], fs[MEMLAT+1], ft[MEMLAT+1]
+	mov	ret0 = in0		/* return tmp2 = dest */
+	.save   pr, saved_pr
+	movi0	saved_pr = pr		/* save the predicate registers */
+} { .mmi
+	and	tmp4 = 7, in0 		/* check if destination is aligned */
+	mov 	dest = in0		/* dest */
+	mov 	src = in1		/* src */
+;; }
+{ .mii
+	cmp.eq	p_scr, p0 = in2, r0	/* if (len == 0) */
+	.save   ar.lc, saved_lc
+        movi0 	saved_lc = ar.lc	/* save the loop counter */
+	.body
+	cmp.ge	p_few, p0 = OP_T_THRES, in2 /* is len <= OP_T_THRESH */
+} { .mbb
+	mov	len = in2		/* len */
+(p_scr)	br.cond.dpnt.few .restore_and_exit /* 	Branch no. 1: return dest */
+(p_few) br.cond.dpnt.many .copy_bytes	/* Branch no. 2: copy byte by byte */
+;; }
+{ .mmi
+#if defined(USE_LFETCH)
+	lfetch.nt1 [dest]		/* */
+	lfetch.nt1 [src]		/* */
+#endif
+	shr.u	elemcnt = len, 3	/* elemcnt = len / 8 */
+} { .mib
+	cmp.eq	p_scr, p0 = tmp4, r0	/* is destination aligned? */
+	sub	loopcnt = 7, tmp4	/* */
+(p_scr) br.cond.dptk.many .dest_aligned
+;; }
+{ .mmi
+	ld1	tmp2 = [src], 1		/* */
+	sub	len = len, loopcnt, 1	/* reduce len */
+	movi0	ar.lc = loopcnt		/* */
+} { .mib
+	cmp.ne  p_scr, p0 = 0, loopcnt	/* avoid loading beyond end-point */
+;; }
+
+.l0:	/* ---------------------------- L0: Align src on 8-byte boundary */
+{ .mmi
+	st1	[dest] = tmp2, 1	/* */
+(p_scr)	ld1	tmp2 = [src], 1		/* */
+} { .mib
+	cmp.lt	p_scr, p0 = 1, loopcnt	/* avoid load beyond end-point */
+	add	loopcnt = -1, loopcnt
+	br.cloop.dptk.few .l0		/* */
+;; }
+
+.dest_aligned:
+{ .mmi
+	and	tmp4 = 7, src		/* ready for alignment check */
+	shr.u	elemcnt = len, 3	/* elemcnt = len / 8 */
+;; }
+{ .mib
+	cmp.ne	p_scr, p0 = tmp4, r0	/* is source also aligned */
+	tbit.nz p_xtr, p_nxtr = src, 3	/* prepare a separate move if src */
+} { .mib				/* is not 16B aligned */
+	add	ptr2 = LFETCH_DIST, dest	/* prefetch address */
+	add	ptr1 = LFETCH_DIST, src
+(p_scr) br.cond.dptk.many .src_not_aligned
+;; }
+
+/* The optimal case, when dest, and src are aligned */
+
+.both_aligned:
+{ .mmi
+	.pred.rel "mutex",p_xtr,p_nxtr
+(p_xtr)	cmp.gt  p_scr, p0 = ALIGN_UNROLL_no+1, elemcnt /* Need N + 1 to qualify */
+(p_nxtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no, elemcnt  /* Need only N to qualify */
+	movi0	pr.rot = 1 << 16	/* set rotating predicates */
+} { .mib
+(p_scr) br.cond.dpnt.many .copy_full_words
+;; }
+
+{ .mmi
+(p_xtr)	load	tempreg = [src], 8
+(p_xtr) add 	elemcnt = -1, elemcnt
+	movi0	ar.ec = MEMLAT + 1	/* set the epilog counter */
+;; }
+{ .mmi
+(p_xtr) add	len = -8, len		/* */
+	add 	asrc = 16, src 		/* one bank apart (for USE_INT) */
+	shr.u	loopcnt = elemcnt, ALIGN_UNROLL_sh  /* cater for unrolling */
+;;}
+{ .mmi
+	add	loopcnt = -1, loopcnt
+(p_xtr)	store	[dest] = tempreg, 8	/* copy the "extra" word */
+	nop.i	0
+;; }
+{ .mib
+	add	adest = 16, dest
+	movi0	ar.lc = loopcnt 	/* set the loop counter */
+;; }
+
+#ifdef  GAS_ALIGN_BREAKS_UNWIND_INFO
+	{ nop 0 }
+#else
+	.align	32
+#endif
+#if defined(USE_FLP)
+.l1: /* ------------------------------- L1: Everything a multiple of 8 */
+{ .mmi
+#if defined(USE_LFETCH)
+(p[0])	lfetch.nt1 [ptr2],32
+#endif
+(p[0])	ldfp8	the_r[0],the_q[0] = [src], 16
+(p[0])	add	len = -32, len
+} {.mmb
+(p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
+(p[MEMLAT]) store [adest] = the_s[MEMLAT], 8
+;; }
+{ .mmi
+#if defined(USE_LFETCH)
+(p[0])	lfetch.nt1 [ptr1],32
+#endif
+(p[0])	ldfp8	the_s[0], the_t[0] = [src], 16
+} {.mmb
+(p[MEMLAT]) store [dest] = the_q[MEMLAT], 24
+(p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
+	br.ctop.dptk.many .l1
+;; }
+#elif defined(USE_INT)
+.l1: /* ------------------------------- L1: Everything a multiple of 8 */
+{ .mmi
+(p[0])	load	the_r[0] = [src], 8
+(p[0])	load	the_q[0] = [asrc], 8
+(p[0])	add	len = -32, len
+} {.mmb
+(p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
+(p[MEMLAT]) store [adest] = the_q[MEMLAT], 8
+;; }
+{ .mmi
+(p[0])	load	the_s[0]  = [src], 24
+(p[0])	load	the_t[0] = [asrc], 24
+} {.mmb
+(p[MEMLAT]) store [dest] = the_s[MEMLAT], 24
+(p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
+#if defined(USE_LFETCH)
+;; }
+{ .mmb
+(p[0])	lfetch.nt1 [ptr2],32
+(p[0])	lfetch.nt1 [ptr1],32
+#endif
+	br.ctop.dptk.many .l1
+;; }
+#endif
+
+.copy_full_words:
+{ .mib
+	cmp.gt	p_scr, p0 = 8, len	/* */
+	shr.u	elemcnt = len, 3	/* */
+(p_scr) br.cond.dpnt.many .copy_bytes
+;; }
+{ .mii
+	load	tempreg = [src], 8
+	add	loopcnt = -1, elemcnt	/* */
+;; }
+{ .mii
+	cmp.ne	p_scr, p0 = 0, loopcnt	/* */
+	mov	ar.lc = loopcnt		/* */
+;; }
+
+.l2: /* ------------------------------- L2: Max 4 words copied separately */
+{ .mmi
+	store	[dest] = tempreg, 8
+(p_scr)	load	tempreg = [src], 8	/* */
+	add	len = -8, len
+} { .mib
+	cmp.lt	p_scr, p0 = 1, loopcnt	/* avoid load beyond end-point */
+	add	loopcnt = -1, loopcnt
+	br.cloop.dptk.few  .l2
+;; }
+
+.copy_bytes:
+{ .mib
+	cmp.eq	p_scr, p0 = len, r0	/* is len == 0 ? */
+	add	loopcnt = -1, len	/* len--; */
+(p_scr)	br.cond.spnt	.restore_and_exit
+;; }
+{ .mii
+	ld1	tmp2 = [src], 1
+	movi0	ar.lc = loopcnt
+	cmp.ne	p_scr, p0 = 0, loopcnt	/* avoid load beyond end-point */
+;; }
+
+.l3: /* ------------------------------- L3: Final byte move */
+{ .mmi
+	st1	[dest] = tmp2, 1
+(p_scr)	ld1	tmp2 = [src], 1
+} { .mib
+	cmp.lt	p_scr, p0 = 1, loopcnt	/* avoid load beyond end-point */
+	add	loopcnt = -1, loopcnt
+	br.cloop.dptk.few  .l3
+;; }
+
+.restore_and_exit:
+{ .mmi
+	movi0	pr = saved_pr, -1	/* restore the predicate registers */
+;; }
+{ .mib
+	movi0	ar.lc = saved_lc	/* restore the loop counter */
+	br.ret.sptk.many b0
+;; }
+
+
+.src_not_aligned:
+{ .mmi
+	cmp.gt	p_scr, p0 = 16, len
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	shr.u	loopcnt = len, 4	/* element-cnt = len / 16 */
+} { .mib
+	add	tmp4 = @ltoff(.table), gp
+	add 	tmp3 = @ltoff(.loop56), gp
+(p_scr)	br.cond.dpnt.many .copy_bytes	/* do byte by byte if too few */
+;; }
+{ .mmi
+	and	asrc = -8, src		/* asrc = (-8) -- align src for loop */
+	add 	loopcnt = -1, loopcnt	/* loopcnt-- */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
+} { .mmi
+	ld8	ptable = [tmp4]		/* ptable = &table */
+	ld8	ploop56 = [tmp3]	/* ploop56 = &loop56 */
+	and	tmp2 = -16, len		/* tmp2 = len & -OPSIZ */
+;; }
+{ .mmi
+	add	tmp3 = ptable, sh1	/* tmp3 = &table + sh1 */
+	add	src = src, tmp2		/* src += len & (-16) */
+	movi0	ar.lc = loopcnt		/* set LC */
+;; }
+{ .mmi
+	ld8	tmp4 = [tmp3]		/* tmp4 = loop offset */
+	sub	len = len, tmp2		/* len -= len & (-16) */
+	movi0	ar.ec = MEMLAT + 2 	/* one more pass needed */
+;; }
+{ .mmi
+	ld8	s[1] = [asrc], 8	/* preload */
+	sub	loopaddr = ploop56,tmp4	/* loopadd = &loop56 - loop offset */
+	movi0   pr.rot = 1 << 16	/* set rotating predicates */
+;; }
+{ .mib
+	nop.m	0
+	movi0	b6 = loopaddr
+	br	b6			/* jump to the appropriate loop */
+;; }
+
+	LOOP(8)
+	LOOP(16)
+	LOOP(24)
+	LOOP(32)
+	LOOP(40)
+	LOOP(48)
+	LOOP(56)
+END(memcpy)
+libc_hidden_def (memcpy)
+
+	.rodata
+	.align 8
+.table:
+	data8	0			/* dummy entry */
+	data8 	.loop56 - .loop8
+	data8 	.loop56 - .loop16
+	data8 	.loop56 - .loop24
+	data8	.loop56 - .loop32
+	data8	.loop56 - .loop40
+	data8	.loop56 - .loop48
+	data8	.loop56 - .loop56
diff --git a/ap/build/uClibc/libc/string/ia64/memmove.S b/ap/build/uClibc/libc/string/ia64/memmove.S
new file mode 100644
index 0000000..beaada6
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/memmove.S
@@ -0,0 +1,251 @@
+/* Optimized version of the standard memmove() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: dest
+
+   Inputs:
+        in0:    dest
+        in1:    src
+        in2:    byte count
+
+   The core of the function is the memcpy implementation used in memcpy.S.
+   When bytes have to be copied backwards, only the easy case, when
+   all arguments are multiples of 8, is optimised.
+
+   In this form, it assumes little endian mode.  For big endian mode,
+   sh1 must be computed using an extra instruction: sub sh1 = 64, sh1
+   or the UM.be bit should be cleared at the beginning and set at the end.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define OP_T_THRES 	16
+#define OPSIZ 		 8
+
+#define adest		r15
+#define saved_pr	r17
+#define saved_lc	r18
+#define dest		r19
+#define src		r20
+#define len		r21
+#define asrc		r22
+#define tmp2		r23
+#define tmp3		r24
+#define	tmp4		r25
+#define ptable		r26
+#define ploop56		r27
+#define	loopaddr	r28
+#define	sh1		r29
+#define loopcnt		r30
+#define	value		r31
+
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+# define ALIGN(n)	{ nop 0 }
+#else
+# define ALIGN(n)	.align n
+#endif
+
+#define LOOP(shift)							\
+		ALIGN(32);						\
+.loop##shift :								\
+(p[0])		ld8	r[0] = [asrc], 8 ;	/* w1 */		\
+(p[MEMLAT+1])	st8	[dest] = value, 8 ;				\
+(p[MEMLAT])	shrp	value = r[MEMLAT], r[MEMLAT+1], shift ;		\
+		nop.b	0 ;						\
+		nop.b	0 ;						\
+		br.ctop.sptk .loop##shift ;				\
+		br.cond.sptk .cpyfew ; /* deal with the remaining bytes */
+
+#define MEMLAT	21
+#define Nrot	(((2*MEMLAT+3) + 7) & ~7)
+
+ENTRY(memmove)
+	.prologue
+	alloc 	r2 = ar.pfs, 3, Nrot - 3, 0, Nrot
+	.rotr	r[MEMLAT + 2], q[MEMLAT + 1]
+	.rotp	p[MEMLAT + 2]
+	mov	ret0 = in0		/* return value = dest */
+	.save pr, saved_pr
+	mov	saved_pr = pr		/* save the predicate registers */
+	.save ar.lc, saved_lc
+        mov 	saved_lc = ar.lc	/* save the loop counter */
+	.body
+	or	tmp3 = in0, in1 ;;	/* tmp3 = dest | src */
+	or	tmp3 = tmp3, in2	/* tmp3 = dest | src | len */
+	mov 	dest = in0		/* dest */
+	mov 	src = in1		/* src */
+	mov	len = in2		/* len */
+	sub	tmp2 = r0, in0		/* tmp2 = -dest */
+	cmp.eq	p6, p0 = in2, r0	/* if (len == 0) */
+(p6)	br.cond.spnt .restore_and_exit;;/* 	return dest; */
+	and	tmp4 = 7, tmp3 		/* tmp4 = (dest | src | len) & 7 */
+	cmp.le	p6, p0 = dest, src	/* if dest <= src it's always safe */
+(p6)	br.cond.spnt .forward		/* to copy forward */
+	add	tmp3 = src, len;;
+	cmp.lt	p6, p0 = dest, tmp3	/* if dest > src && dest < src + len */
+(p6)	br.cond.spnt .backward		/* we have to copy backward */
+
+.forward:
+	shr.u	loopcnt = len, 4 ;;	/* loopcnt = len / 16 */
+	cmp.ne	p6, p0 = tmp4, r0	/* if ((dest | src | len) & 7 != 0) */
+(p6)	br.cond.sptk .next		/*	goto next; */
+
+/* The optimal case, when dest, src and len are all multiples of 8 */
+
+	and	tmp3 = 0xf, len
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	mov	ar.ec = MEMLAT + 1 ;;	/* set the epilog counter */
+	cmp.ne	p6, p0 = tmp3, r0	/* do we have to copy an extra word? */
+	adds	loopcnt = -1, loopcnt;;	/* --loopcnt */
+(p6)	ld8	value = [src], 8;;
+(p6)	st8	[dest] = value, 8	/* copy the "odd" word */
+	mov	ar.lc = loopcnt 	/* set the loop counter */
+	cmp.eq	p6, p0 = 8, len
+(p6)	br.cond.spnt .restore_and_exit;;/* the one-word special case */
+	adds	adest = 8, dest		/* set adest one word ahead of dest */
+	adds	asrc = 8, src ;;	/* set asrc one word ahead of src */
+	nop.b	0			/* get the "golden" alignment for */
+	nop.b	0			/* the next loop */
+.l0:
+(p[0])		ld8	r[0] = [src], 16
+(p[0])		ld8	q[0] = [asrc], 16
+(p[MEMLAT])	st8	[dest] = r[MEMLAT], 16
+(p[MEMLAT])	st8	[adest] = q[MEMLAT], 16
+		br.ctop.dptk .l0 ;;
+
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
+	mov	ar.lc = saved_lc	/* restore the loop counter */
+	br.ret.sptk.many b0
+.next:
+	cmp.ge	p6, p0 = OP_T_THRES, len	/* is len <= OP_T_THRES */
+	and	loopcnt = 7, tmp2 		/* loopcnt = -dest % 8 */
+(p6)	br.cond.spnt	.cpyfew			/* copy byte by byte */
+	;;
+	cmp.eq	p6, p0 = loopcnt, r0
+(p6)	br.cond.sptk	.dest_aligned
+	sub	len = len, loopcnt	/* len -= -dest % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	;;
+	mov	ar.lc = loopcnt
+.l1:					/* copy -dest % 8 bytes */
+	ld1	value = [src], 1	/* value = *src++ */
+	;;
+	st1	[dest] = value, 1	/* *dest++ = value */
+	br.cloop.dptk .l1
+.dest_aligned:
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	and	tmp2 = -8, len   	/* tmp2 = len & -OPSIZ */
+	and	asrc = -8, src		/* asrc = src & -OPSIZ  -- align src */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and	len = 7, len;;		/* len = len % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	addl	tmp4 = @ltoff(.table), gp
+	addl	tmp3 = @ltoff(.loop56), gp
+	mov     ar.ec = MEMLAT + 1	/* set EC */
+	mov     pr.rot = 1 << 16;;	/* set rotating predicates */
+	mov	ar.lc = loopcnt		/* set LC */
+	cmp.eq  p6, p0 = sh1, r0 	/* is the src aligned? */
+(p6)    br.cond.sptk .src_aligned
+	add	src = src, tmp2		/* src += len & -OPSIZ */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
+	ld8	ploop56 = [tmp3]	/* ploop56 = &loop56 */
+	ld8	ptable = [tmp4];;	/* ptable = &table */
+	add	tmp3 = ptable, sh1;;	/* tmp3 = &table + sh1 */
+	mov	ar.ec = MEMLAT + 1 + 1 /* one more pass needed */
+	ld8	tmp4 = [tmp3];;		/* tmp4 = loop offset */
+	sub	loopaddr = ploop56,tmp4	/* loopadd = &loop56 - loop offset */
+	ld8	r[1] = [asrc], 8;;	/* w0 */
+	mov	b6 = loopaddr;;
+	br	b6			/* jump to the appropriate loop */
+
+	LOOP(8)
+	LOOP(16)
+	LOOP(24)
+	LOOP(32)
+	LOOP(40)
+	LOOP(48)
+	LOOP(56)
+
+.src_aligned:
+.l3:
+(p[0])		ld8	r[0] = [src], 8
+(p[MEMLAT])	st8	[dest] = r[MEMLAT], 8
+		br.ctop.dptk .l3
+.cpyfew:
+	cmp.eq	p6, p0 = len, r0	/* is len == 0 ? */
+	adds	len = -1, len		/* --len; */
+(p6)	br.cond.spnt	.restore_and_exit ;;
+	mov	ar.lc = len
+.l4:
+	ld1	value = [src], 1
+	;;
+	st1	[dest] = value, 1
+	br.cloop.dptk	.l4 ;;
+.restore_and_exit:
+	mov     pr = saved_pr, -1    	/* restore the predicate registers */
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	br.ret.sptk.many b0
+
+/* In the case of a backward copy, optimise only the case when everything
+   is a multiple of 8, otherwise copy byte by byte.  The backward copy is
+   used only when the blocks are overlapping and dest > src.
+*/
+.backward:
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	add	src = src, len		/* src points one byte past the end */
+	add	dest = dest, len ;; 	/* dest points one byte past the end */
+	mov	ar.ec = MEMLAT + 1	/* set the epilog counter */
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	cmp.ne	p6, p0 = tmp4, r0	/* if ((dest | src | len) & 7 != 0) */
+(p6)	br.cond.sptk .bytecopy ;;	/* copy byte by byte backward */
+	adds	src = -8, src		/* src points to the last word */
+	adds	dest = -8, dest 	/* dest points to the last word */
+	mov	ar.lc = loopcnt;;	/* set the loop counter */
+.l5:
+(p[0])		ld8	r[0] = [src], -8
+(p[MEMLAT])	st8	[dest] = r[MEMLAT], -8
+		br.ctop.dptk .l5
+		br.cond.sptk .restore_and_exit
+.bytecopy:
+	adds	src = -1, src		/* src points to the last byte */
+	adds	dest = -1, dest		/* dest points to the last byte */
+	adds	loopcnt = -1, len;;	/* loopcnt = len - 1 */
+	mov	ar.lc = loopcnt;;	/* set the loop counter */
+.l6:
+(p[0])		ld1	r[0] = [src], -1
+(p[MEMLAT])	st1	[dest] = r[MEMLAT], -1
+		br.ctop.dptk .l6
+		br.cond.sptk .restore_and_exit
+END(memmove)
+
+	.rodata
+	.align 8
+.table:
+	data8	0			/* dummy entry */
+	data8 	.loop56 - .loop8
+	data8 	.loop56 - .loop16
+	data8 	.loop56 - .loop24
+	data8	.loop56 - .loop32
+	data8	.loop56 - .loop40
+	data8	.loop56 - .loop48
+	data8	.loop56 - .loop56
+
+libc_hidden_def (memmove)
diff --git a/ap/build/uClibc/libc/string/ia64/memset.S b/ap/build/uClibc/libc/string/ia64/memset.S
new file mode 100644
index 0000000..45df583
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/memset.S
@@ -0,0 +1,400 @@
+/* Optimized version of the standard memset() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
+   Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: dest
+
+   Inputs:
+        in0:    dest
+        in1:    value
+        in2:    count
+
+   The algorithm is fairly straightforward: set byte by byte until we
+   we get to a 16B-aligned address, then loop on 128 B chunks using an
+   early store as prefetching, then loop on 32B chucks, then clear remaining
+   words, finally clear remaining bytes.
+   Since a stf.spill f0 can store 16B in one go, we use this instruction
+   to get peak speed when value = 0.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define dest		in0
+#define value		in1
+#define	cnt		in2
+
+#define tmp		r31
+#define save_lc		r30
+#define ptr0		r29
+#define ptr1		r28
+#define ptr2		r27
+#define ptr3		r26
+#define ptr9		r24
+#define	loopcnt		r23
+#define linecnt		r22
+#define bytecnt		r21
+
+#define fvalue		f6
+
+/* This routine uses only scratch predicate registers (p6 - p15) */
+#define p_scr		p6	/* default register for same-cycle branches */
+#define p_nz		p7
+#define p_zr		p8
+#define p_unalgn	p9
+#define p_y		p11
+#define p_n		p12
+#define p_yy		p13
+#define p_nn		p14
+
+#define movi0		mov
+
+#define MIN1		15
+#define MIN1P1HALF	8
+#define LINE_SIZE	128
+#define LSIZE_SH        7			/* shift amount */
+#define PREF_AHEAD	8
+
+#define USE_FLP
+#if defined(USE_INT)
+#define store		st8
+#define myval           value
+#elif defined(USE_FLP)
+#define store		stf8
+#define myval		fvalue
+#endif
+
+.align	64
+ENTRY(memset)
+{ .mmi
+	.prologue
+	alloc	tmp = ar.pfs, 3, 0, 0, 0
+	lfetch.nt1 [dest]
+	.save   ar.lc, save_lc
+	movi0	save_lc = ar.lc
+} { .mmi
+	.body
+	mov	ret0 = dest		/* return value */
+	cmp.ne	p_nz, p_zr = value, r0	/* use stf.spill if value is zero */
+	cmp.eq	p_scr, p0 = cnt, r0
+;; }
+{ .mmi
+	and	ptr2 = -(MIN1+1), dest	/* aligned address */
+	and	tmp = MIN1, dest	/* prepare to check for alignment */
+	tbit.nz p_y, p_n = dest, 0	/* Do we have an odd address? (M_B_U) */
+} { .mib
+	mov	ptr1 = dest
+	mux1	value = value, @brcst	/* create 8 identical bytes in word */
+(p_scr)	br.ret.dpnt.many rp		/* return immediately if count = 0 */
+;; }
+{ .mib
+	cmp.ne	p_unalgn, p0 = tmp, r0
+} { .mib				/* NB: # of bytes to move is 1 higher */
+	sub	bytecnt = (MIN1+1), tmp	/*     than loopcnt */
+	cmp.gt	p_scr, p0 = 16, cnt		/* is it a minimalistic task? */
+(p_scr)	br.cond.dptk.many .move_bytes_unaligned	/* go move just a few (M_B_U) */
+;; }
+{ .mmi
+(p_unalgn) add	ptr1 = (MIN1+1), ptr2		/* after alignment */
+(p_unalgn) add	ptr2 = MIN1P1HALF, ptr2		/* after alignment */
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3	/* should we do a st8 ? */
+;; }
+{ .mib
+(p_y)	add	cnt = -8, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2	/* should we do a st4 ? */
+} { .mib
+(p_y)	st8	[ptr2] = value, -4
+(p_n)	add	ptr2 = 4, ptr2
+;; }
+{ .mib
+(p_yy)	add	cnt = -4, cnt
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1	/* should we do a st2 ? */
+} { .mib
+(p_yy)	st4	[ptr2] = value, -2
+(p_nn)	add	ptr2 = 2, ptr2
+;; }
+{ .mmi
+	mov	tmp = LINE_SIZE+1		/* for compare */
+(p_y)	add	cnt = -2, cnt
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0	/* should we do a st1 ? */
+} { .mmi
+	setf.sig fvalue=value			/* transfer value to FLP side */
+(p_y)	st2	[ptr2] = value, -1
+(p_n)	add	ptr2 = 1, ptr2
+;; }
+
+{ .mmi
+(p_yy)	st1	[ptr2] = value
+	cmp.gt	p_scr, p0 = tmp, cnt		/* is it a minimalistic task? */
+} { .mbb
+(p_yy)	add	cnt = -1, cnt
+(p_scr)	br.cond.dpnt.many .fraction_of_line	/* go move just a few */
+;; }
+
+{ .mib
+	nop.m 0
+	shr.u	linecnt = cnt, LSIZE_SH
+(p_zr)	br.cond.dptk.many .l1b			/* Jump to use stf.spill */
+;; }
+
+#ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
+	.align 32 /* --------  L1A: store ahead into cache lines; fill later */
+#endif
+{ .mmi
+	and	tmp = -(LINE_SIZE), cnt		/* compute end of range */
+	mov	ptr9 = ptr1			/* used for prefetching */
+	and	cnt = (LINE_SIZE-1), cnt	/* remainder */
+} { .mmi
+	mov	loopcnt = PREF_AHEAD-1		/* default prefetch loop */
+	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	/* check against actual value */
+;; }
+{ .mmi
+(p_scr)	add	loopcnt = -1, linecnt		/* start of stores */
+	add	ptr2 = 8, ptr1			/* (beyond prefetch stores) */
+	add	ptr1 = tmp, ptr1		/* first address beyond total */
+;; }						/* range */
+{ .mmi
+	add	tmp = -1, linecnt		/* next loop count */
+	movi0	ar.lc = loopcnt
+;; }
+.pref_l1a:
+{ .mib
+	store [ptr9] = myval, 128	/* Do stores one cache line apart */
+	nop.i	0
+	br.cloop.dptk.few .pref_l1a
+;; }
+{ .mmi
+	add	ptr0 = 16, ptr2		/* Two stores in parallel */
+	movi0	ar.lc = tmp
+;; }
+.l1ax:
+ { .mmi
+	store [ptr2] = myval, 8
+	store [ptr0] = myval, 8
+ ;; }
+ { .mmi
+	store [ptr2] = myval, 24
+	store [ptr0] = myval, 24
+ ;; }
+ { .mmi
+	store [ptr2] = myval, 8
+	store [ptr0] = myval, 8
+ ;; }
+ { .mmi
+	store [ptr2] = myval, 24
+	store [ptr0] = myval, 24
+ ;; }
+ { .mmi
+	store [ptr2] = myval, 8
+	store [ptr0] = myval, 8
+ ;; }
+ { .mmi
+	store [ptr2] = myval, 24
+	store [ptr0] = myval, 24
+ ;; }
+ { .mmi
+	store [ptr2] = myval, 8
+	store [ptr0] = myval, 32
+	cmp.lt	p_scr, p0 = ptr9, ptr1	/* do we need more prefetching? */
+ ;; }
+{ .mmb
+	store [ptr2] = myval, 24
+(p_scr)	store [ptr9] = myval, 128
+	br.cloop.dptk.few .l1ax
+;; }
+{ .mbb
+	cmp.le  p_scr, p0 = 8, cnt		/* just a few bytes left ? */
+(p_scr) br.cond.dpnt.many  .fraction_of_line	/* Branch no. 2 */
+	br.cond.dpnt.many  .move_bytes_from_alignment	/* Branch no. 3 */
+;; }
+
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+	{ nop 0 }
+#else
+	.align 32
+#endif
+.l1b:	/* ------------------  L1B: store ahead into cache lines; fill later */
+{ .mmi
+	and	tmp = -(LINE_SIZE), cnt		/* compute end of range */
+	mov	ptr9 = ptr1			/* used for prefetching */
+	and	cnt = (LINE_SIZE-1), cnt	/* remainder */
+} { .mmi
+	mov	loopcnt = PREF_AHEAD-1		/* default prefetch loop */
+	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	/* check against actual value */
+;; }
+{ .mmi
+(p_scr)	add	loopcnt = -1, linecnt
+	add	ptr2 = 16, ptr1	/* start of stores (beyond prefetch stores) */
+	add	ptr1 = tmp, ptr1	/* first address beyond total range */
+;; }
+{ .mmi
+	add	tmp = -1, linecnt	/* next loop count */
+	movi0	ar.lc = loopcnt
+;; }
+.pref_l1b:
+{ .mib
+	stf.spill [ptr9] = f0, 128	/* Do stores one cache line apart */
+	nop.i   0
+	br.cloop.dptk.few .pref_l1b
+;; }
+{ .mmi
+	add	ptr0 = 16, ptr2		/* Two stores in parallel */
+	movi0	ar.lc = tmp
+;; }
+.l1bx:
+ { .mmi
+	stf.spill [ptr2] = f0, 32
+	stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+	stf.spill [ptr2] = f0, 32
+	stf.spill [ptr0] = f0, 32
+ ;; }
+ { .mmi
+	stf.spill [ptr2] = f0, 32
+	stf.spill [ptr0] = f0, 64
+	cmp.lt	p_scr, p0 = ptr9, ptr1	/* do we need more prefetching? */
+ ;; }
+{ .mmb
+	stf.spill [ptr2] = f0, 32
+(p_scr)	stf.spill [ptr9] = f0, 128
+	br.cloop.dptk.few .l1bx
+;; }
+{ .mib
+	cmp.gt  p_scr, p0 = 8, cnt	/* just a few bytes left ? */
+(p_scr)	br.cond.dpnt.many  .move_bytes_from_alignment
+;; }
+
+.fraction_of_line:
+{ .mib
+	add	ptr2 = 16, ptr1
+	shr.u	loopcnt = cnt, 5	/* loopcnt = cnt / 32 */
+;; }
+{ .mib
+	cmp.eq	p_scr, p0 = loopcnt, r0
+	add	loopcnt = -1, loopcnt
+(p_scr)	br.cond.dpnt.many store_words
+;; }
+{ .mib
+	and	cnt = 0x1f, cnt		/* compute the remaining cnt */
+	movi0   ar.lc = loopcnt
+;; }
+#ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
+	.align 32
+#endif
+.l2:	/* ----------------------------  L2A:  store 32B in 2 cycles */
+{ .mmb
+	store	[ptr1] = myval, 8
+	store	[ptr2] = myval, 8
+;; } { .mmb
+	store	[ptr1] = myval, 24
+	store	[ptr2] = myval, 24
+	br.cloop.dptk.many .l2
+;; }
+store_words:
+{ .mib
+	cmp.gt	p_scr, p0 = 8, cnt		/* just a few bytes left ? */
+(p_scr)	br.cond.dpnt.many .move_bytes_from_alignment	/* Branch */
+;; }
+
+{ .mmi
+	store	[ptr1] = myval, 8		/* store */
+	cmp.le	p_y, p_n = 16, cnt		/* */
+	add	cnt = -8, cnt			/* subtract */
+;; }
+{ .mmi
+(p_y)	store	[ptr1] = myval, 8		/* store */
+(p_y)	cmp.le.unc p_yy, p_nn = 16, cnt		/* */
+(p_y)	add	cnt = -8, cnt			/* subtract */
+;; }
+{ .mmi						/* store */
+(p_yy)	store	[ptr1] = myval, 8		/* */
+(p_yy)	add	cnt = -8, cnt			/* subtract */
+;; }
+
+.move_bytes_from_alignment:
+{ .mib
+	cmp.eq	p_scr, p0 = cnt, r0
+	tbit.nz.unc p_y, p0 = cnt, 2	/* should we terminate with a st4 ? */
+(p_scr)	br.cond.dpnt.few .restore_and_exit
+;; }
+{ .mib
+(p_y)	st4	[ptr1] = value, 4
+	tbit.nz.unc p_yy, p0 = cnt, 1	/* should we terminate with a st2 ? */
+;; }
+{ .mib
+(p_yy)	st2	[ptr1] = value, 2
+	tbit.nz.unc p_y, p0 = cnt, 0
+;; }
+
+{ .mib
+(p_y)	st1	[ptr1] = value
+;; }
+.restore_and_exit:
+{ .mib
+	nop.m	0
+	movi0	ar.lc = save_lc
+	br.ret.sptk.many rp
+;; }
+
+.move_bytes_unaligned:
+{ .mmi
+       .pred.rel "mutex",p_y, p_n
+       .pred.rel "mutex",p_yy, p_nn
+(p_n)	cmp.le  p_yy, p_nn = 4, cnt
+(p_y)	cmp.le  p_yy, p_nn = 5, cnt
+(p_n)	add	ptr2 = 2, ptr1
+} { .mmi
+(p_y)	add	ptr2 = 3, ptr1
+(p_y)	st1	[ptr1] = value, 1	/* fill 1 (odd-aligned) byte */
+(p_y)	add	cnt = -1, cnt		/* [15, 14 (or less) left] */
+;; }
+{ .mmi
+(p_yy)	cmp.le.unc p_y, p0 = 8, cnt
+	add	ptr3 = ptr1, cnt	/* prepare last store */
+	movi0	ar.lc = save_lc
+} { .mmi
+(p_yy)	st2	[ptr1] = value, 4	/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = value, 4	/* fill 2 (aligned) bytes */
+(p_yy)	add	cnt = -4, cnt		/* [11, 10 (o less) left] */
+;; }
+{ .mmi
+(p_y)	cmp.le.unc p_yy, p0 = 8, cnt
+	add	ptr3 = -1, ptr3		/* last store */
+	tbit.nz p_scr, p0 = cnt, 1	/* will there be a st2 at the end ? */
+} { .mmi
+(p_y)	st2	[ptr1] = value, 4	/* fill 2 (aligned) bytes */
+(p_y)	st2	[ptr2] = value, 4	/* fill 2 (aligned) bytes */
+(p_y)	add	cnt = -4, cnt		/* [7, 6 (or less) left] */
+;; }
+{ .mmi
+(p_yy)	st2	[ptr1] = value, 4	/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = value, 4	/* fill 2 (aligned) bytes */
+					/* [3, 2 (or less) left] */
+	tbit.nz p_y, p0 = cnt, 0	/* will there be a st1 at the end ? */
+} { .mmi
+(p_yy)	add	cnt = -4, cnt
+;; }
+{ .mmb
+(p_scr)	st2	[ptr1] = value		/* fill 2 (aligned) bytes */
+(p_y)	st1	[ptr3] = value		/* fill last byte (using ptr3) */
+	br.ret.sptk.many rp
+;; }
+END(memset)
+libc_hidden_def (memset)
diff --git a/ap/build/uClibc/libc/string/ia64/softpipe.h b/ap/build/uClibc/libc/string/ia64/softpipe.h
new file mode 100644
index 0000000..d71af73
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/softpipe.h
@@ -0,0 +1,29 @@
+/* This file is part of the GNU C Library.
+   Copyright (C) 2000 Free Software Foundation, Inc.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* The latency of a memory load assumed by the assembly implementation
+   of the mem and str functions.  Since we don't have any clue about
+   where the data might be, let's assume it's in the L2 cache.
+   Assuming L3 would be too pessimistic :-)
+
+   Some functions define MEMLAT as 2, because they expect their data
+   to be in the L1D cache.  */
+
+#ifndef MEMLAT
+# define MEMLAT 6
+#endif
diff --git a/ap/build/uClibc/libc/string/ia64/strchr.S b/ap/build/uClibc/libc/string/ia64/strchr.S
new file mode 100644
index 0000000..66703f2
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/strchr.S
@@ -0,0 +1,113 @@
+/* Optimized version of the standard strchr() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: the address of the first occurence of chr in str or NULL
+
+   Inputs:
+        in0:    str
+        in1:    chr
+
+   A modified version of memchr.S, the search ends when the character is
+   found or the terminating null character is encountered.
+
+   This implementation assumes little endian mode.  For big endian mode,
+   the instruction czx1.r should be replaced by czx1.l.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define saved_lc	r18
+#define poschr		r19
+#define pos0		r20
+#define val1		r21
+#define val2		r22
+#define tmp		r24
+#define chrx8		r25
+#define loopcnt		r30
+
+#define str		in0
+#define chr		in1
+
+ENTRY(strchr)
+	.prologue
+	alloc r2 = ar.pfs, 2, 0, 0, 0
+	.save ar.lc, saved_lc
+        mov 	saved_lc = ar.lc 	/* save the loop counter */
+	.body
+	mov 	ret0 = str	
+	and 	tmp = 7, str		/* tmp = str % 8 */
+	mux1	chrx8 = chr, @brcst
+	extr.u	chr = chr, 0, 8		/* retain only the last byte */
+	cmp.ne	p8, p0 = r0, r0		/* clear p8 */
+	;;
+	sub	loopcnt = 8, tmp	/* loopcnt = 8 - tmp */
+	cmp.eq	p6, p0 = tmp, r0
+(p6)	br.cond.sptk	.str_aligned;;
+	adds	loopcnt = -1, loopcnt;;
+	mov	ar.lc = loopcnt
+.l1:
+	ld1	val2 = [ret0], 1
+	;;
+	cmp.eq	p6, p0 = val2, chr
+	cmp.eq	p7, p0 = val2, r0
+(p6)	br.cond.spnt	.restore_and_exit
+(p7)	br.cond.spnt	.notfound
+	br.cloop.sptk	.l1
+.str_aligned:
+	ld8	val1 = [ret0], 8;;
+	nop.b	0
+	nop.b 	0
+.l2:	
+	ld8.s	val2 = [ret0], 8	/* don't bomb out here */
+	czx1.r	pos0 = val1	
+	xor	tmp = val1, chrx8	/* if val1 contains chr, tmp will */
+	;;				/* contain a zero in its position */
+	czx1.r	poschr = tmp
+	cmp.ne	p6, p0 = 8, pos0
+	;;
+	cmp.ne	p7, p0 = 8, poschr
+(p7)	br.cond.spnt .foundit
+(p6)	br.cond.spnt .notfound
+	chk.s	val2, .recovery
+.back:
+	mov	val1 = val2	
+	br.cond.dptk .l2
+.foundit:
+(p6)	cmp.lt	p8, p0 = pos0, poschr	/* we found chr and null in the word */
+(p8)	br.cond.spnt .notfound		/* null was found before chr */
+	add	ret0 = ret0, poschr ;;
+	adds	ret0 = -15, ret0 ;;	/* should be -16, but we decrement */
+.restore_and_exit:			/* ret0 in the next instruction */
+	adds	ret0 = -1, ret0		/* ret0 was pointing 1 char too far */
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	br.ret.sptk.many b0
+.notfound:
+	mov	ret0 = r0		/* return NULL if null was found */
+	mov 	ar.lc = saved_lc
+	br.ret.sptk.many b0
+.recovery:
+	adds	ret0 = -8, ret0;;
+	ld8	val2 = [ret0], 8	/* bomb out here */
+	br.cond.sptk	.back
+END(strchr)
+libc_hidden_def (strchr)
+#ifdef __UCLIBC_SUSV3_LEGACY__
+strong_alias (strchr, index)
+#endif
diff --git a/ap/build/uClibc/libc/string/ia64/strcmp.S b/ap/build/uClibc/libc/string/ia64/strcmp.S
new file mode 100644
index 0000000..4da72fa
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/strcmp.S
@@ -0,0 +1,59 @@
+/* Optimized version of the standard strcmp() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: the result of the comparison
+
+   Inputs:
+        in0:    s1
+        in1:    s2
+
+   Unlike memcmp(), this function is optimized for mismatches within the
+   first few characters.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define s1		in0
+#define s2		in1
+
+#define val1		r15
+#define val2		r16
+
+
+ENTRY(strcmp)
+	alloc	r2 = ar.pfs, 2, 0, 0, 0
+.loop:
+	ld1	val1 = [s1], 1
+	ld1	val2 = [s2], 1
+	cmp.eq	p6, p0 = r0, r0		/* set p6 */
+	;;
+	cmp.ne.and p6, p0 = val1, r0
+	cmp.ne.and p6, p0 = val2, r0
+	cmp.eq.and p6, p0 = val1, val2
+(p6)	br.cond.sptk .loop
+	sub	ret0 = val1, val2
+	br.ret.sptk.many b0
+END(strcmp)
+libc_hidden_def (strcmp)
+
+#ifndef __UCLIBC_HAS_LOCALE__
+strong_alias(strcmp,strcoll)
+libc_hidden_def(strcoll)
+#endif
diff --git a/ap/build/uClibc/libc/string/ia64/strcpy.S b/ap/build/uClibc/libc/string/ia64/strcpy.S
new file mode 100644
index 0000000..7b002f6
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/strcpy.S
@@ -0,0 +1,145 @@
+/* Optimized version of the standard strcpy() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: dest
+
+   Inputs:
+        in0:    dest
+        in1:    src
+
+   In this form, it assumes little endian mode.  For big endian mode, the
+   the two shifts in .l2 must be inverted:
+
+	shl	value = r[1], sh1	// value = w0 << sh1
+	shr.u   tmp = r[0], sh2		// tmp = w1 >> sh2
+ */
+
+#include "sysdep.h"
+#undef ret
+
+#define saved_lc	r15
+#define saved_pr	r16
+#define thresh		r17
+#define dest		r19
+#define src		r20
+#define len		r21
+#define asrc		r22
+#define tmp		r23
+#define pos		r24
+#define w0		r25
+#define w1		r26
+#define c		r27
+#define sh2		r28
+#define	sh1		r29
+#define loopcnt		r30
+#define	value		r31
+
+ENTRY(strcpy)
+	.prologue
+	alloc	r2 = ar.pfs, 2, 0, 30, 32
+
+#define MEMLAT 2
+	.rotr	r[MEMLAT + 2]
+	.rotp	p[MEMLAT + 1]
+
+	mov	ret0 = in0		/* return value = dest */
+	.save pr, saved_pr
+	mov	saved_pr = pr           /* save the predicate registers */
+	.save ar.lc, saved_lc
+        mov	saved_lc = ar.lc	/* save the loop counter */
+	.body
+	sub	tmp = r0, in0 ;;	/* tmp = -dest */
+	mov	dest = in0		/* dest */
+	mov	src = in1		/* src */
+	and	loopcnt = 7, tmp ;;	/* loopcnt = -dest % 8 */
+	cmp.eq	p6, p0 = loopcnt, r0
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+(p6)	br.cond.sptk .dest_aligned ;;
+	mov	ar.lc = loopcnt
+.l1:					/* copy -dest % 8 bytes */
+	ld1	c = [src], 1		/* c = *src++ */
+	;;
+	st1	[dest] = c, 1		/* *dest++ = c */
+	cmp.eq	p6, p0 = c, r0
+(p6)	br.cond.dpnt .restore_and_exit
+	br.cloop.dptk .l1 ;;
+.dest_aligned:
+	and	sh1 = 7, src		/* sh1 = src % 8 */
+	mov	ar.lc = -1		/* "infinite" loop */
+	and	asrc = -8, src ;;	/* asrc = src & -OPSIZ  -- align src */
+	sub	thresh = 8, sh1
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	cmp.ne	p7, p0 = r0, r0		/* clear p7 */
+	shl	sh1 = sh1, 3 ;;		/* sh1 = 8 * (src % 8) */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	cmp.eq  p6, p0 = sh1, r0	/* is the src aligned? */
+(p6)    br.cond.sptk .src_aligned ;;
+	ld8	r[1] = [asrc],8 ;;
+
+	.align	32
+.l2:
+	ld8.s	r[0] = [asrc], 8
+	shr.u	value = r[1], sh1 ;;	/* value = w0 >> sh1 */
+	czx1.r	pos = value ;;		/* do we have an "early" zero */
+	cmp.lt	p7, p0 = pos, thresh	/* in w0 >> sh1? */
+(p7)	br.cond.dpnt .found0
+	chk.s	r[0], .recovery2	/* it is safe to do that only */
+.back2:					/* after the previous test */
+	shl	tmp = r[0], sh2		/* tmp = w1 << sh2 */
+	;;
+	or	value = value, tmp ;;	/* value |= tmp */
+	czx1.r	pos = value ;;
+	cmp.ne	p7, p0 = 8, pos
+(p7)	br.cond.dpnt .found0
+	st8	[dest] = value, 8	/* store val to dest */
+	br.ctop.dptk    .l2 ;;
+.src_aligned:
+.l3:
+(p[0])		ld8.s	r[0] = [src], 8
+(p[MEMLAT])	chk.s	r[MEMLAT], .recovery3
+.back3:
+(p[MEMLAT])	mov	value = r[MEMLAT]
+(p[MEMLAT])	czx1.r	pos = r[MEMLAT] ;;
+(p[MEMLAT])	cmp.ne	p7, p0 = 8, pos
+(p7)		br.cond.dpnt .found0
+(p[MEMLAT])	st8	[dest] = r[MEMLAT], 8
+		br.ctop.dptk .l3 ;;
+.found0:
+	mov	ar.lc = pos
+.l4:
+	extr.u	c = value, 0, 8		/* c = value & 0xff */
+	shr.u	value = value, 8
+	;;
+	st1	[dest] = c, 1
+	br.cloop.dptk	.l4 ;;
+.restore_and_exit:
+	mov	ar.lc = saved_lc	/* restore the loop counter */
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
+	br.ret.sptk.many b0
+.recovery2:
+	add	tmp = -8, asrc ;;
+	ld8	r[0] = [tmp]
+	br.cond.sptk .back2
+.recovery3:
+	add	tmp = -(MEMLAT + 1) * 8, src ;;
+	ld8	r[MEMLAT] = [tmp]
+	br.cond.sptk .back3
+END(strcpy)
+libc_hidden_def (strcpy)
diff --git a/ap/build/uClibc/libc/string/ia64/strlen.S b/ap/build/uClibc/libc/string/ia64/strlen.S
new file mode 100644
index 0000000..edbe843
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/strlen.S
@@ -0,0 +1,98 @@
+/* Optimized version of the standard strlen() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003, 2005 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: the length of the input string
+
+   Input:
+        in0:    str
+
+   Look for the null character byte by byte, until we reach a word aligned
+   address, then search word by word, using the czx instruction.  We're
+   also doing one word of read ahead, which could cause problems if the
+   null character is on the last word of a page and the next page is not
+   mapped in the process address space.  Hence the use of the speculative
+   load.
+
+   This implementation assumes little endian mode.  For big endian mode,
+   the instruction czx1.r should be replaced by czx1.l.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define saved_lc	r18
+#define str		r19
+#define pos0		r20
+#define val1		r21
+#define val2		r22
+#define origadd		r23
+#define tmp		r24
+#define loopcnt		r30
+#define len		ret0
+
+ENTRY(strlen)
+	.prologue
+	alloc r2 = ar.pfs, 1, 0, 0, 0
+	.save ar.lc, saved_lc
+        mov 	saved_lc = ar.lc 	/* save the loop counter */
+	.body
+	mov 	str = in0	
+	mov 	len = r0		/* len = 0 */
+	and 	tmp = 7, in0		/* tmp = str % 8 */
+	;;
+	sub	loopcnt = 8, tmp	/* loopcnt = 8 - tmp */
+	cmp.eq	p6, p0 = tmp, r0
+(p6)	br.cond.sptk	.str_aligned;;
+	adds	loopcnt = -1, loopcnt;;
+	mov	ar.lc = loopcnt
+.l1:
+	ld1	val2 = [str], 1
+	;;
+	cmp.eq	p6, p0 = val2, r0
+(p6)	br.cond.spnt	.restore_and_exit
+	adds	len = 1, len
+	br.cloop.dptk	.l1
+.str_aligned:
+	mov	origadd = str		/* origadd = orig */
+	ld8	val1 = [str], 8;;
+	nop.b	0
+	nop.b 	0
+.l2:	ld8.s	val2 = [str], 8		/* don't bomb out here */
+	czx1.r	pos0 = val1	
+	;;
+	cmp.ne	p6, p0 = 8, pos0
+(p6)	br.cond.spnt .foundit
+	chk.s	val2, .recovery
+.back:
+	mov	val1 = val2	
+	br.cond.dptk	.l2
+.foundit:
+	sub	tmp = str, origadd	/* tmp = crt address - orig */
+	add	len = len, pos0;;
+	add	len = len, tmp;;
+	adds	len = -16, len
+.restore_and_exit:
+	mov ar.lc = saved_lc		/* restore the loop counter */
+	br.ret.sptk.many b0
+.recovery:
+	adds	str = -8, str;;
+	ld8	val2 = [str], 8		/* bomb out here */
+	br.cond.sptk	.back
+END(strlen)
+libc_hidden_def (strlen)
diff --git a/ap/build/uClibc/libc/string/ia64/strncmp.S b/ap/build/uClibc/libc/string/ia64/strncmp.S
new file mode 100644
index 0000000..e31f8fb
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/strncmp.S
@@ -0,0 +1,62 @@
+/* Optimized version of the standard strncmp() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: the result of the comparison
+
+   Inputs:
+        in0:    s1
+        in1:    s2
+	in2:	n
+
+   Unlike memcmp(), this function is optimized for mismatches within the
+   first few characters.  */
+
+#include "sysdep.h"
+#undef ret
+
+#define s1		in0
+#define s2		in1
+#define n		in2
+
+#define val1		r15
+#define val2		r16
+
+
+ENTRY(strncmp)
+	alloc	r2 = ar.pfs, 3, 0, 0, 0
+	mov	ret0 = r0
+	cmp.eq  p6, p0 = r0, r0		/* set p6 */
+	cmp.eq	p7, p0 = n, r0		/* return immediately if n == 0 */
+(p7)	br.cond.spnt .restore_and_exit ;;
+.loop:
+	ld1	val1 = [s1], 1
+	ld1	val2 = [s2], 1
+	adds	n = -1, n		/* n-- */
+	;;
+	cmp.ne.and p6, p0 = val1, r0
+	cmp.ne.and p6, p0 = val2, r0
+	cmp.ne.and p6, p0 = n, r0
+	cmp.eq.and p6, p0 = val1, val2
+(p6)	br.cond.sptk .loop
+	sub	ret0 = val1, val2
+.restore_and_exit:
+	br.ret.sptk.many b0
+END(strncmp)
+libc_hidden_weak (strncmp)
diff --git a/ap/build/uClibc/libc/string/ia64/strncpy.S b/ap/build/uClibc/libc/string/ia64/strncpy.S
new file mode 100644
index 0000000..3f29bbd
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/strncpy.S
@@ -0,0 +1,232 @@
+/* Optimized version of the standard strncpy() function.
+   This file is part of the GNU C Library.
+   Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+   Contributed by Dan Pop <Dan.Pop@cern.ch>
+	      and Jakub Jelinek <jakub@redhat.com>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Return: dest
+
+   Inputs:
+	in0:    dest
+	in1:    src
+	in2:	len
+
+   In this form, it assumes little endian mode.
+ */
+
+#include "sysdep.h"
+#undef ret
+
+#define saved_lc	r15
+#define saved_pr	r16
+#define thresh		r17
+#define dest		r18
+#define dest2		r19
+#define src		r20
+#define len		r21
+#define asrc		r22
+#define tmp		r23
+#define pos		r24
+#define w0		r25
+#define w1		r26
+#define c		r27
+#define sh2		r28
+#define	sh1		r29
+#define loopcnt		r30
+#define	value		r31
+
+ENTRY(strncpy)
+	.prologue
+	alloc 	r2 = ar.pfs, 3, 0, 29, 32
+
+#define MEMLAT 2
+	.rotr	r[MEMLAT + 2]
+	.rotp	p[MEMLAT + 1]
+
+	mov	ret0 = in0		/* return value = dest */
+	.save pr, saved_pr
+	mov	saved_pr = pr           /* save the predicate registers */
+	.save ar.lc, saved_lc
+	mov 	saved_lc = ar.lc	/* save the loop counter */
+	mov	ar.ec = 0		/* ec is not guaranteed to */
+					/* be zero upon function entry */
+	.body
+	cmp.geu p6, p5 = 24, in2
+(p6)	br.cond.spnt .short_len
+	sub	tmp = r0, in0 ;;	/* tmp = -dest */
+	mov	len = in2		/* len */
+	mov 	dest = in0		/* dest */
+	mov 	src = in1		/* src */
+	and	tmp = 7, tmp ;;		/* loopcnt = -dest % 8 */
+	cmp.eq	p6, p7 = tmp, r0
+	adds	loopcnt = -1, tmp	/* --loopcnt */
+(p6)	br.cond.sptk .dest_aligned ;;
+	sub	len = len, tmp		/* len -= -dest % 8 */
+	mov	ar.lc = loopcnt
+.l1:					/* copy -dest % 8 bytes */
+(p5)	ld1	c = [src], 1		/* c = *src++ */
+	;;
+	st1	[dest] = c, 1		/* *dest++ = c */
+	cmp.ne	p5, p7 = c, r0
+	br.cloop.dptk .l1 ;;
+(p7)	br.cond.dpnt	.found0_align
+
+.dest_aligned:				/* p7 should be cleared here */
+	shr.u	c = len, 3		/* c = len / 8 */
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	and	asrc = -8, src ;;	/* asrc = src & -OPSIZ  -- align src */
+	adds	c = (MEMLAT-1), c	/* c = (len / 8) + MEMLAT - 1 */
+	sub	thresh = 8, sh1
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	shl	sh1 = sh1, 3 ;;		/* sh1 = 8 * (src % 8) */
+	mov	ar.lc = c		/* "infinite" loop */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	cmp.eq  p6, p0 = sh1, r0 	/* is the src aligned? */
+(p6)    br.cond.sptk .src_aligned
+	adds	c = -(MEMLAT-1), c ;;	/* c = (len / 8) */
+	ld8	r[1] = [asrc],8
+	mov	ar.lc = c ;;
+
+	.align	32
+.l2:
+(p6)	st8	[dest] = value, 8	/* store val to dest */
+	ld8.s	r[0] = [asrc], 8
+	shr.u	value = r[1], sh1 ;; 	/* value = w0 >> sh1 */
+	czx1.r	pos = value ;;		/* do we have an "early" zero */
+	cmp.lt	p7, p0 = pos, thresh	/* in w0 >> sh1? */
+	adds	len = -8, len		/* len -= 8 */
+(p7)	br.cond.dpnt .nonalign_found0
+	chk.s	r[0], .recovery2	/* it is safe to do that only */
+.back2:					/* after the previous test */
+	shl	tmp = r[0], sh2  	/* tmp = w1 << sh2 */
+	;;
+	or	value = value, tmp ;;	/* value |= tmp */
+	czx1.r	pos = value ;;
+	cmp.ne	p7, p6 = 8, pos
+(p7)	br.cond.dpnt .nonalign_found0
+	br.ctop.dptk    .l2 ;;
+	adds	len = 8, len
+	br.cond.sptk	.not_found0 ;;
+.nonalign_found0:
+	cmp.gtu	p6, p0 = -8, len
+(p6)	br.cond.dptk .found0
+	adds	len = 8, len
+	br.cond.sptk	.not_found0 ;;
+
+	.align	32
+.src_aligned:
+.l3:
+(p[0])		ld8.s	r[0] = [src], 8
+(p[MEMLAT])	chk.s	r[MEMLAT], .recovery3
+.back3:
+(p[MEMLAT])	mov	value = r[MEMLAT]
+(p[MEMLAT])	czx1.r	pos = r[MEMLAT] ;;
+(p[MEMLAT])	cmp.ne	p7, p0 = 8, pos
+(p[MEMLAT])	adds	len = -8, len	/* len -= 8 */
+(p7)		br.cond.dpnt .found0
+(p[MEMLAT])	st8	[dest] = r[MEMLAT], 8
+		br.ctop.dptk .l3 ;;
+
+	chk.s	r[MEMLAT-1], .recovery4
+.back4:
+	mov	value = r[MEMLAT-1]
+
+.not_found0:
+	cmp.eq	p5, p6 = len, r0
+	adds	len = -1, len
+(p5)	br.cond.dptk	.restore_and_exit ;;
+	mov	ar.lc = len
+.l4:
+(p6)	extr.u	c = value, 0, 8		/* c = value & 0xff */
+(p6)	shr.u	value = value, 8 ;;
+	st1	[dest] = c, 1
+	cmp.ne	p6, p0 = c, r0
+	br.cloop.dptk	.l4
+	br.cond.sptk	.restore_and_exit
+
+.found0_align:
+	mov	pos = 0
+	adds	len = -8, len
+	mov	value = 0 ;;
+.found0:
+	shl	tmp = pos, 3
+	shr.u	loopcnt = len, 4	/* loopcnt = len / 16 */
+	mov	c = -1 ;;
+	cmp.eq	p6, p0 = loopcnt, r0
+	adds	loopcnt = -1, loopcnt
+	shl	c = c, tmp ;;
+	and	len = 0xf, len
+	andcm	value = value, c
+	mov	ar.lc = loopcnt ;;
+	cmp.le	p7, p0 = 8, len
+	adds	dest2 = 16, dest
+	st8	[dest] = value, 8
+	and	len = 0x7, len
+(p6)	br.cond.dpnt	.l6 ;;
+.l5:
+	st8	[dest] = r0, 16
+	st8	[dest2] = r0, 16
+	br.cloop.dptk	.l5 ;;
+.l6:
+(p7)	st8	[dest] = r0, 8
+	cmp.eq	p5, p0 = len, r0
+	adds	len = -1, len
+(p5)	br.cond.dptk .restore_and_exit ;;
+	mov	ar.lc = len ;;
+.l7:
+	st1	[dest] = r0, 1
+	br.cloop.dptk	.l7 ;;
+.restore_and_exit:
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
+	br.ret.sptk.many b0
+
+.short_len:
+	cmp.eq	p5, p0 = in2, r0
+	adds	loopcnt = -1, in2
+(p5)	br.cond.spnt .restore_and_exit ;;
+	mov	ar.lc = loopcnt		/* p6 should be set when we get here */
+.l8:
+(p6)	ld1	c = [in1], 1		/* c = *src++ */
+	;;
+	st1	[in0] = c, 1		/* *dest++ = c */
+(p6)	cmp.ne	p6, p0 = c, r0
+	br.cloop.dptk .l8
+	;;
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
+	br.ret.sptk.many b0
+.recovery2:
+	add	c = 8, len
+	add	tmp = -8, asrc ;;
+	cmp.gtu	p8, p5 = c, thresh ;;
+(p8)	ld8	r[0] = [tmp]
+(p5)	mov	r[0] = r0
+	br.cond.sptk .back2
+.recovery3:
+	add	tmp = -(MEMLAT + 1) * 8, src ;;
+	ld8	r[MEMLAT] = [tmp]
+	br.cond.sptk .back3
+.recovery4:
+	cmp.eq	p5, p6 = len, r0
+	add	tmp = -MEMLAT * 8, src ;;
+(p6)	ld8	r[MEMLAT - 1] = [tmp]
+(p5)	mov	r[MEMLAT - 1] = r0
+	br.cond.sptk .back4
+END(strncpy)
+libc_hidden_def (strncpy)
diff --git a/ap/build/uClibc/libc/string/ia64/sysdep.h b/ap/build/uClibc/libc/string/ia64/sysdep.h
new file mode 100644
index 0000000..d10020a
--- /dev/null
+++ b/ap/build/uClibc/libc/string/ia64/sysdep.h
@@ -0,0 +1,168 @@
+/* Copyright (C) 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Written by Jes Sorensen, <Jes.Sorensen@cern.ch>, April 1999.
+   Based on code originally written by David Mosberger-Tang
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#ifndef _LINUX_IA64_SYSDEP_H
+#define _LINUX_IA64_SYSDEP_H 1
+
+#include <features.h>
+#include <asm/unistd.h>
+
+#ifdef __ASSEMBLER__
+
+/* Macros to help writing .prologue directives in assembly code.  */
+#define ASM_UNW_PRLG_RP			0x8
+#define ASM_UNW_PRLG_PFS		0x4
+#define ASM_UNW_PRLG_PSP		0x2
+#define ASM_UNW_PRLG_PR			0x1
+#define ASM_UNW_PRLG_GRSAVE(ninputs)	(32+(ninputs))
+
+#ifdef	__STDC__
+#define C_LABEL(name)		name :
+#else
+#define C_LABEL(name)		name/**/:
+#endif
+
+#define CALL_MCOUNT
+
+#define ENTRY(name)				\
+	.text;					\
+	.align 32;				\
+	.proc C_SYMBOL_NAME(name);		\
+	.global C_SYMBOL_NAME(name);		\
+	C_LABEL(name)				\
+	CALL_MCOUNT
+
+#define LEAF(name)				\
+  .text;					\
+  .align 32;					\
+  .proc C_SYMBOL_NAME(name);			\
+  .global name;					\
+  C_LABEL(name)
+
+/* Mark the end of function SYM.  */
+#undef END
+#define END(sym)	.endp C_SYMBOL_NAME(sym)
+
+/* For Linux we can use the system call table in the header file
+	/usr/include/asm/unistd.h
+   of the kernel.  But these symbols do not follow the SYS_* syntax
+   so we have to redefine the `SYS_ify' macro here.  */
+#undef SYS_ify
+#ifdef __STDC__
+# define SYS_ify(syscall_name)	__NR_##syscall_name
+#else
+# define SYS_ify(syscall_name)	__NR_/**/syscall_name
+#endif
+
+/* Linux uses a negative return value to indicate syscall errors, unlike
+   most Unices, which use the condition codes' carry flag.
+
+   Since version 2.1 the return value of a system call might be negative
+   even if the call succeeded.  E.g., the `lseek' system call might return
+   a large offset.  Therefore we must not anymore test for < 0, but test
+   for a real error by making sure the value in %d0 is a real error
+   number.  Linus said he will make sure the no syscall returns a value
+   in -1 .. -4095 as a valid result so we can savely test with -4095.  */
+
+/* We don't want the label for the error handler to be visible in the symbol
+   table when we define it here.  */
+#define SYSCALL_ERROR_LABEL __syscall_error
+
+#undef PSEUDO
+#define	PSEUDO(name, syscall_name, args)	\
+  ENTRY(name)					\
+    DO_CALL (SYS_ify(syscall_name));		\
+	cmp.eq p6,p0=-1,r10;			\
+(p6)	br.cond.spnt.few __syscall_error;
+
+#define DO_CALL_VIA_BREAK(num)			\
+	mov r15=num;				\
+	break __BREAK_SYSCALL
+
+#ifdef IA64_USE_NEW_STUB
+# ifdef SHARED
+#  define DO_CALL(num)				\
+	.prologue;				\
+	adds r2 = SYSINFO_OFFSET, r13;;		\
+	ld8 r2 = [r2];				\
+	.save ar.pfs, r11;			\
+	mov r11 = ar.pfs;;			\
+	.body;					\
+	mov r15 = num;				\
+	mov b7 = r2;				\
+	br.call.sptk.many b6 = b7;;		\
+	.restore sp;				\
+	mov ar.pfs = r11;			\
+	.prologue;				\
+	.body
+# else /* !SHARED */
+#  define DO_CALL(num)				\
+	.prologue;				\
+	mov r15 = num;				\
+	movl r2 = _dl_sysinfo;;			\
+	ld8 r2 = [r2];				\
+	.save ar.pfs, r11;			\
+	mov r11 = ar.pfs;;			\
+	.body;					\
+	mov b7 = r2;				\
+	br.call.sptk.many b6 = b7;;		\
+	.restore sp;				\
+	mov ar.pfs = r11;			\
+	.prologue;				\
+	.body
+# endif
+#else
+# define DO_CALL(num)				DO_CALL_VIA_BREAK(num)
+#endif
+
+#undef PSEUDO_END
+#define PSEUDO_END(name)	.endp C_SYMBOL_NAME(name);
+
+#undef PSEUDO_NOERRNO
+#define	PSEUDO_NOERRNO(name, syscall_name, args)	\
+  ENTRY(name)						\
+    DO_CALL (SYS_ify(syscall_name));
+
+#undef PSEUDO_END_NOERRNO
+#define PSEUDO_END_NOERRNO(name)	.endp C_SYMBOL_NAME(name);
+
+#undef PSEUDO_ERRVAL
+#define	PSEUDO_ERRVAL(name, syscall_name, args)	\
+  ENTRY(name)					\
+    DO_CALL (SYS_ify(syscall_name));		\
+	cmp.eq p6,p0=-1,r10;			\
+(p6)	mov r10=r8;
+
+
+#undef PSEUDO_END_ERRVAL
+#define PSEUDO_END_ERRVAL(name)	.endp C_SYMBOL_NAME(name);
+
+#undef END
+#define END(name)						\
+	.size	C_SYMBOL_NAME(name), . - C_SYMBOL_NAME(name) ;	\
+	.endp	C_SYMBOL_NAME(name)
+
+#define ret			br.ret.sptk.few b0
+#define ret_NOERRNO		ret
+#define ret_ERRVAL		ret
+
+#endif /* not __ASSEMBLER__ */
+
+#endif /* linux/ia64/sysdep.h */