about summary refs log tree commit diff
path: root/sysdeps/ia64
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2003-11-19 06:12:51 +0000
committerUlrich Drepper <drepper@redhat.com>2003-11-19 06:12:51 +0000
commitc9002c1b7a648fe49b8472d6dc6724c78fa1329f (patch)
treeb9a61d0ea66ac08f3bc064d1ddd8823a3e65ebba /sysdeps/ia64
parentad7f28c29d06ddb4506d0d75e089732740b5bd2b (diff)
downloadglibc-c9002c1b7a648fe49b8472d6dc6724c78fa1329f.tar.gz
glibc-c9002c1b7a648fe49b8472d6dc6724c78fa1329f.tar.xz
glibc-c9002c1b7a648fe49b8472d6dc6724c78fa1329f.zip
Update.
2003-11-14 David Mosberger   <davidm@hpl.hp.com>

	* sysdeps/unix/sysv/linux/ia64/sysdep.h
	(GAS_ALIGN_BREAKS_UNWIND_INFO): Define this macro to indicate
	that all existing GAS versions have a problem with .align inside
	a function.
	* sysdeps/ia64/memccpy.S: Work around GAS_ALIGN_BREAKS_UNWIND_INFO bug.
	* sysdeps/ia64/memcpy.S: Likewise.
	* sysdeps/ia64/memset.S: Likewise.
	* sysdeps/ia64/memmove.S: Likewise.  Also move the jump-table to
	out of .text into .rodata, where it belongs.

	* sysdeps/unix/sysv/linux/ia64/pipe.S: There is no need to
	save/restore input-arguments, because they're necessarily
	preserved by the kernel to support syscall-restart.
Diffstat (limited to 'sysdeps/ia64')
-rw-r--r--sysdeps/ia64/memccpy.S11
-rw-r--r--sysdeps/ia64/memcpy.S20
-rw-r--r--sysdeps/ia64/memmove.S15
-rw-r--r--sysdeps/ia64/memset.S12
4 files changed, 48 insertions, 10 deletions
diff --git a/sysdeps/ia64/memccpy.S b/sysdeps/ia64/memccpy.S
index 6bccb96b70..53c43c512b 100644
--- a/sysdeps/ia64/memccpy.S
+++ b/sysdeps/ia64/memccpy.S
@@ -52,6 +52,15 @@
 #define loopcnt		r30
 #define	value		r31
 
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+/* Manually force proper loop-alignment.  Note: be sure to
+   double-check the code-layout after making any changes to
+   this routine! */
+# define ALIGN(n)	{ nop 0 }
+#else
+# define ALIGN(n)	.align n
+#endif
+
 ENTRY(memccpy)
 	.prologue
 	alloc 	r2 = ar.pfs, 4, 40 - 4, 0, 40
@@ -110,7 +119,7 @@ ENTRY(memccpy)
 	mov	ar.ec = MEMLAT + 6 + 1 	// six more passes needed
 	ld8	r[1] = [asrc], 8 	// r[1] = w0
 	cmp.ne	p6, p0 = r0, r0	;;	// clear p6
-	.align	32
+	ALIGN(32)
 .l2:
 (p[0])		ld8.s	r[0] = [asrc], 8		// r[0] = w1
 (p[MEMLAT])	shr.u	tmp1[0] = r[1 + MEMLAT], sh1	// tmp1 = w0 >> sh1
diff --git a/sysdeps/ia64/memcpy.S b/sysdeps/ia64/memcpy.S
index 7471bdbc12..a2aeea00fd 100644
--- a/sysdeps/ia64/memcpy.S
+++ b/sysdeps/ia64/memcpy.S
@@ -103,14 +103,22 @@
 #define the_z		z
 #endif
 
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+/* Manually force proper loop-alignment.  Note: be sure to
+   double-check the code-layout after making any changes to
+   this routine! */
+# define ALIGN(n)	{ nop 0 }
+#else
+# define ALIGN(n)	.align n
+#endif
 
 #if defined(USE_LFETCH)
 #define LOOP(shift)						\
-		.align	32 ;					\
+		ALIGN(32);					\
 .loop##shift##:							\
 { .mmb								\
 (p[0])	ld8.nt1	r[0] = [asrc], 8 ;				\
-(p[0])	lfetch.nt1 [ptr1], 16 ;				\
+(p[0])	lfetch.nt1 [ptr1], 16 ;					\
 	nop.b 0 ;						\
 } { .mib							\
 (p[MEMLAT+1]) st8 [dest] = tmp3, 8 ;				\
@@ -118,7 +126,7 @@
  	nop.b 0 ;;						\
  } { .mmb							\
 (p[0])	ld8.nt1	s[0] = [asrc], 8 ;				\
-(p[0])	lfetch.nt1	[ptr2], 16 ;			\
+(p[0])	lfetch.nt1	[ptr2], 16 ;				\
 	nop.b 0 ;						\
 } { .mib							\
 (p[MEMLAT+1]) st8 [dest] = tmp4, 8 ;				\
@@ -130,7 +138,7 @@
 }
 #else
 #define LOOP(shift)						\
-		.align	32 ;					\
+		ALIGN(32);					\
 .loop##shift##:							\
 { .mmb								\
 (p[0])	ld8.nt1	r[0] = [asrc], 8 ;				\
@@ -254,7 +262,11 @@ ENTRY(memcpy)
 	movi0	ar.lc = loopcnt 	// set the loop counter
 ;; }
 
+#ifdef  GAS_ALIGN_BREAKS_UNWIND_INFO
+	{ nop 0 }
+#else
 	.align	32
+#endif
 #if defined(USE_FLP)
 .l1: // ------------------------------- // L1: Everything a multiple of 8
 { .mmi
diff --git a/sysdeps/ia64/memmove.S b/sysdeps/ia64/memmove.S
index af0f3924ed..7b8c86b324 100644
--- a/sysdeps/ia64/memmove.S
+++ b/sysdeps/ia64/memmove.S
@@ -56,12 +56,18 @@
 #define loopcnt		r30
 #define	value		r31
 
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+# define ALIGN(n)	{ nop 0 }
+#else
+# define ALIGN(n)	.align n
+#endif
+
 #define LOOP(shift)							\
-		.align	32 ;						\
+		ALIGN(32);						\
 .loop##shift##:								\
 (p[0])		ld8	r[0] = [asrc], 8 ;	/* w1 */		\
 (p[MEMLAT+1])	st8	[dest] = value, 8 ;				\
-(p[MEMLAT])	shrp	value = r[MEMLAT], r[MEMLAT+1], shift ;	\
+(p[MEMLAT])	shrp	value = r[MEMLAT], r[MEMLAT+1], shift ;		\
 		nop.b	0 ;						\
 		nop.b	0 ;						\
 		br.ctop.sptk .loop##shift ;				\
@@ -228,6 +234,10 @@ ENTRY(memmove)
 (p[MEMLAT])	st1	[dest] = r[MEMLAT], -1
 		br.ctop.dptk .l6
 		br.cond.sptk .restore_and_exit
+END(memmove)
+
+	.rodata
+	.align 8
 .table:
 	data8	0			// dummy entry
 	data8 	.loop56 - .loop8
@@ -238,5 +248,4 @@ ENTRY(memmove)
 	data8	.loop56 - .loop48
 	data8	.loop56 - .loop56
 
-END(memmove)
 libc_hidden_builtin_def (memmove)
diff --git a/sysdeps/ia64/memset.S b/sysdeps/ia64/memset.S
index 3353000186..84d8f0a191 100644
--- a/sysdeps/ia64/memset.S
+++ b/sysdeps/ia64/memset.S
@@ -153,7 +153,9 @@ ENTRY(memset)
 (p_zr)	br.cond.dptk.many .l1b			// Jump to use stf.spill
 ;; }
 
+#ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
 	.align 32 // -------- //  L1A: store ahead into cache lines; fill later
+#endif
 { .mmi
 	and	tmp = -(LINE_SIZE), cnt		// compute end of range
 	mov	ptr9 = ptr1			// used for prefetching
@@ -222,7 +224,11 @@ ENTRY(memset)
 	br.cond.dpnt.many  .move_bytes_from_alignment	// Branch no. 3
 ;; }
 
+#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
+	{ nop 0 }
+#else
 	.align 32
+#endif
 .l1b:	// ------------------ //  L1B: store ahead into cache lines; fill later
 { .mmi
 	and	tmp = -(LINE_SIZE), cnt		// compute end of range
@@ -283,13 +289,15 @@ ENTRY(memset)
 { .mib
 	cmp.eq	p_scr, p0 = loopcnt, r0
 	add	loopcnt = -1, loopcnt
-(p_scr)	br.cond.dpnt.many .store_words
+(p_scr)	br.cond.dpnt.many store_words
 ;; }
 { .mib
 	and	cnt = 0x1f, cnt		// compute the remaining cnt
 	movi0   ar.lc = loopcnt
 ;; }
+#ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
 	.align 32
+#endif
 .l2:	// ---------------------------- //  L2A:  store 32B in 2 cycles
 { .mmb
 	store	[ptr1] = myval, 8
@@ -299,7 +307,7 @@ ENTRY(memset)
 	store	[ptr2] = myval, 24
 	br.cloop.dptk.many .l2
 ;; }
-.store_words:
+store_words:
 { .mib
 	cmp.gt	p_scr, p0 = 8, cnt		// just a few bytes left ?
 (p_scr)	br.cond.dpnt.many .move_bytes_from_alignment	// Branch