about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2013-01-05 16:00:38 -0800
committerH.J. Lu <hjl.tools@gmail.com>2013-01-05 16:00:38 -0800
commitafec409af996af8efb082c109d0dd9fc211f4d8e (patch)
tree2e1cb5c60b908d13e0333b98e16424ad81e940f3 /sysdeps
parent0b3986d0dcc2b7e6f18420563179a01c356e73dd (diff)
downloadglibc-afec409af996af8efb082c109d0dd9fc211f4d8e.tar.gz
glibc-afec409af996af8efb082c109d0dd9fc211f4d8e.tar.xz
glibc-afec409af996af8efb082c109d0dd9fc211f4d8e.zip
Change __x86_64 prefix in cache size to __x86
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/i386/i686/cacheinfo.c9
-rw-r--r--sysdeps/x86_64/cacheinfo.c54
-rw-r--r--sysdeps/x86_64/memcpy.S6
-rw-r--r--sysdeps/x86_64/memset.S6
-rw-r--r--sysdeps/x86_64/multiarch/memcmp-sse4.S4
-rw-r--r--sysdeps/x86_64/multiarch/memcpy-ssse3-back.S10
-rw-r--r--sysdeps/x86_64/multiarch/memcpy-ssse3.S12
7 files changed, 46 insertions, 55 deletions
diff --git a/sysdeps/i386/i686/cacheinfo.c b/sysdeps/i386/i686/cacheinfo.c
index 3635961727..0f869df4d8 100644
--- a/sysdeps/i386/i686/cacheinfo.c
+++ b/sysdeps/i386/i686/cacheinfo.c
@@ -1,12 +1,3 @@
-#define __x86_64_data_cache_size __x86_data_cache_size
-#define __x86_64_raw_data_cache_size __x86_raw_data_cache_size
-#define __x86_64_data_cache_size_half __x86_data_cache_size_half
-#define __x86_64_raw_data_cache_size_half __x86_raw_data_cache_size_half
-#define __x86_64_shared_cache_size __x86_shared_cache_size
-#define __x86_64_raw_shared_cache_size __x86_raw_shared_cache_size
-#define __x86_64_shared_cache_size_half __x86_shared_cache_size_half
-#define __x86_64_raw_shared_cache_size_half __x86_raw_shared_cache_size_half
-
 #define DISABLE_PREFETCHW
 #define DISABLE_PREFERRED_MEMORY_INSTRUCTION
 
diff --git a/sysdeps/x86_64/cacheinfo.c b/sysdeps/x86_64/cacheinfo.c
index 60981cad37..9aed28f854 100644
--- a/sysdeps/x86_64/cacheinfo.c
+++ b/sysdeps/x86_64/cacheinfo.c
@@ -505,24 +505,24 @@ __cache_sysconf (int name)
 
 /* Data cache size for use in memory and string routines, typically
    L1 size, rounded to multiple of 256 bytes.  */
-long int __x86_64_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
-long int __x86_64_data_cache_size attribute_hidden = 32 * 1024;
-/* Similar to __x86_64_data_cache_size_half, but not rounded.  */
-long int __x86_64_raw_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
-/* Similar to __x86_64_data_cache_size, but not rounded.  */
-long int __x86_64_raw_data_cache_size attribute_hidden = 32 * 1024;
+long int __x86_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
+long int __x86_data_cache_size attribute_hidden = 32 * 1024;
+/* Similar to __x86_data_cache_size_half, but not rounded.  */
+long int __x86_raw_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
+/* Similar to __x86_data_cache_size, but not rounded.  */
+long int __x86_raw_data_cache_size attribute_hidden = 32 * 1024;
 /* Shared cache size for use in memory and string routines, typically
    L2 or L3 size, rounded to multiple of 256 bytes.  */
-long int __x86_64_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
-long int __x86_64_shared_cache_size attribute_hidden = 1024 * 1024;
-/* Similar to __x86_64_shared_cache_size_half, but not rounded.  */
-long int __x86_64_raw_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
-/* Similar to __x86_64_shared_cache_size, but not rounded.  */
-long int __x86_64_raw_shared_cache_size attribute_hidden = 1024 * 1024;
+long int __x86_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
+long int __x86_shared_cache_size attribute_hidden = 1024 * 1024;
+/* Similar to __x86_shared_cache_size_half, but not rounded.  */
+long int __x86_raw_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
+/* Similar to __x86_shared_cache_size, but not rounded.  */
+long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
 
 #ifndef DISABLE_PREFETCHW
 /* PREFETCHW support flag for use in memory and string routines.  */
-int __x86_64_prefetchw attribute_hidden;
+int __x86_prefetchw attribute_hidden;
 #endif
 
 #ifndef DISABLE_PREFERRED_MEMORY_INSTRUCTION
@@ -534,7 +534,7 @@ int __x86_64_prefetchw attribute_hidden;
   3: SSSE3 instructions
 
   */
-int __x86_64_preferred_memory_instruction attribute_hidden;
+int __x86_preferred_memory_instruction attribute_hidden;
 #endif
 
 
@@ -591,9 +591,9 @@ init_cacheinfo (void)
       /* Intel prefers SSSE3 instructions for memory/string routines
 	 if they are available.  */
       if ((ecx & 0x200))
-	__x86_64_preferred_memory_instruction = 3;
+	__x86_preferred_memory_instruction = 3;
       else
-	__x86_64_preferred_memory_instruction = 2;
+	__x86_preferred_memory_instruction = 2;
 #endif
 
       /* Figure out the number of logical threads that share the
@@ -684,9 +684,9 @@ init_cacheinfo (void)
 	 if they are avaiable, otherwise it prefers integer
 	 instructions.  */
       if ((ecx & 0x200))
-	__x86_64_preferred_memory_instruction = 3;
+	__x86_preferred_memory_instruction = 3;
       else
-	__x86_64_preferred_memory_instruction = 0;
+	__x86_preferred_memory_instruction = 0;
 #endif
 
       /* Get maximum extended function. */
@@ -730,28 +730,28 @@ init_cacheinfo (void)
 	  __cpuid (0x80000001, eax, ebx, ecx, edx);
 	  /*  PREFETCHW     || 3DNow!  */
 	  if ((ecx & 0x100) || (edx & 0x80000000))
-	    __x86_64_prefetchw = -1;
+	    __x86_prefetchw = -1;
 	}
 #endif
     }
 
   if (data > 0)
     {
-      __x86_64_raw_data_cache_size_half = data / 2;
-      __x86_64_raw_data_cache_size = data;
+      __x86_raw_data_cache_size_half = data / 2;
+      __x86_raw_data_cache_size = data;
       /* Round data cache size to multiple of 256 bytes.  */
       data = data & ~255L;
-      __x86_64_data_cache_size_half = data / 2;
-      __x86_64_data_cache_size = data;
+      __x86_data_cache_size_half = data / 2;
+      __x86_data_cache_size = data;
     }
 
   if (shared > 0)
     {
-      __x86_64_raw_shared_cache_size_half = shared / 2;
-      __x86_64_raw_shared_cache_size = shared;
+      __x86_raw_shared_cache_size_half = shared / 2;
+      __x86_raw_shared_cache_size = shared;
       /* Round shared cache size to multiple of 256 bytes.  */
       shared = shared & ~255L;
-      __x86_64_shared_cache_size_half = shared / 2;
-      __x86_64_shared_cache_size = shared;
+      __x86_shared_cache_size_half = shared / 2;
+      __x86_shared_cache_size = shared;
     }
 }
diff --git a/sysdeps/x86_64/memcpy.S b/sysdeps/x86_64/memcpy.S
index 5e4dbc7220..d6cd553a26 100644
--- a/sysdeps/x86_64/memcpy.S
+++ b/sysdeps/x86_64/memcpy.S
@@ -254,7 +254,7 @@ L(32after):
 
 L(fasttry):				/* first 1/2 L1 */
 #ifndef NOT_IN_libc			/* only up to this algorithm outside of libc.so */
-	mov	__x86_64_data_cache_size_half(%rip), %R11_LP
+	mov	__x86_data_cache_size_half(%rip), %R11_LP
 	cmpq	%rdx, %r11		/* calculate the smaller of */
 	cmovaq	%rdx, %r11		/* remaining bytes and 1/2 L1 */
 #endif
@@ -303,7 +303,7 @@ L(fastafter):
 /* Handle large blocks smaller than 1/2 L2. */
 
 L(pretry):				/* first 1/2 L2 */
-	mov	__x86_64_shared_cache_size_half (%rip), %R8_LP
+	mov	__x86_shared_cache_size_half (%rip), %R8_LP
 	cmpq	%rdx, %r8		/* calculate the lesser of */
 	cmovaq	%rdx, %r8		/* remaining bytes and 1/2 L2 */
 
@@ -322,7 +322,7 @@ L(pre):					/* 64-byte with prefetching */
 	movq	%rbx, SAVE3(%rsp)
 	cfi_rel_offset (%rbx, SAVE3)
 
-	cmpl	$0, __x86_64_prefetchw(%rip)
+	cmpl	$0, __x86_prefetchw(%rip)
 	jz	L(preloop)		/* check if PREFETCHW OK */
 
 	.p2align 4
diff --git a/sysdeps/x86_64/memset.S b/sysdeps/x86_64/memset.S
index e0d4dfa9c3..f3a4d448db 100644
--- a/sysdeps/x86_64/memset.S
+++ b/sysdeps/x86_64/memset.S
@@ -862,7 +862,7 @@ L(SSE15Q0): mov    %rdx,-0xf(%rdi)
 	.balign     16
 L(byte32sse2_pre):
 
-	mov    __x86_64_shared_cache_size(%rip),%r9d  # The largest cache size
+	mov    __x86_shared_cache_size(%rip),%r9d  # The largest cache size
 	cmp    %r9,%r8
 	ja     L(sse2_nt_move_pre)
 	#jmp    L(byte32sse2)
@@ -1205,7 +1205,7 @@ L(SSExDx):
 #ifndef USE_MULTIARCH
 L(aligned_now):
 
-	 cmpl   $0x1,__x86_64_preferred_memory_instruction(%rip)
+	 cmpl   $0x1,__x86_preferred_memory_instruction(%rip)
 	 jg     L(SSE_pre)
 #endif /* USE_MULTIARCH */
 
@@ -1262,7 +1262,7 @@ L(8byte_move_skip):
 
 	.balign     16
 L(8byte_stos_try):
-	mov    __x86_64_shared_cache_size(%rip),%r9d // ck largest cache size
+	mov    __x86_shared_cache_size(%rip),%r9d // ck largest cache size
 	cmpq	%r8,%r9		// calculate the lesser of remaining
 	cmovaq	%r8,%r9		// bytes and largest cache size
 	jbe	L(8byte_stos)
diff --git a/sysdeps/x86_64/multiarch/memcmp-sse4.S b/sysdeps/x86_64/multiarch/memcmp-sse4.S
index 08eade9f54..1ed4200f4c 100644
--- a/sysdeps/x86_64/multiarch/memcmp-sse4.S
+++ b/sysdeps/x86_64/multiarch/memcmp-sse4.S
@@ -321,7 +321,7 @@ L(512bytesormore):
 # ifdef DATA_CACHE_SIZE_HALF
 	mov	$DATA_CACHE_SIZE_HALF, %R8_LP
 # else
-	mov	__x86_64_data_cache_size_half(%rip), %R8_LP
+	mov	__x86_data_cache_size_half(%rip), %R8_LP
 # endif
 	mov	%r8, %r9
 	shr	$1, %r8
@@ -637,7 +637,7 @@ L(512bytesormorein2aligned):
 # ifdef DATA_CACHE_SIZE_HALF
 	mov	$DATA_CACHE_SIZE_HALF, %R8_LP
 # else
-	mov	__x86_64_data_cache_size_half(%rip), %R8_LP
+	mov	__x86_data_cache_size_half(%rip), %R8_LP
 # endif
 	mov	%r8, %r9
 	shr	$1, %r8
diff --git a/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S b/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S
index 03e18b3890..fc9fcef27d 100644
--- a/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S
+++ b/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S
@@ -108,7 +108,7 @@ L(144bytesormore):
 #ifdef DATA_CACHE_SIZE
 	mov	$DATA_CACHE_SIZE, %RCX_LP
 #else
-	mov	__x86_64_data_cache_size(%rip), %RCX_LP
+	mov	__x86_data_cache_size(%rip), %RCX_LP
 #endif
 	cmp	%rcx, %rdx
 	jae	L(gobble_mem_fwd)
@@ -124,7 +124,7 @@ L(copy_backward):
 #ifdef DATA_CACHE_SIZE
 	mov	$DATA_CACHE_SIZE, %RCX_LP
 #else
-	mov	__x86_64_data_cache_size(%rip), %RCX_LP
+	mov	__x86_data_cache_size(%rip), %RCX_LP
 #endif
 	shl	$1, %rcx
 	cmp	%rcx, %rdx
@@ -158,7 +158,7 @@ L(shl_0):
 #ifdef DATA_CACHE_SIZE
 	cmp	$DATA_CACHE_SIZE_HALF, %R9_LP
 #else
-	cmp	__x86_64_data_cache_size_half(%rip), %R9_LP
+	cmp	__x86_data_cache_size_half(%rip), %R9_LP
 #endif
 	jae	L(gobble_mem_fwd)
 	sub	$0x80, %rdx
@@ -1480,7 +1480,7 @@ L(gobble_mem_fwd):
 #ifdef SHARED_CACHE_SIZE_HALF
 	mov	$SHARED_CACHE_SIZE_HALF, %RCX_LP
 #else
-	mov	__x86_64_shared_cache_size_half(%rip), %RCX_LP
+	mov	__x86_shared_cache_size_half(%rip), %RCX_LP
 #endif
 #ifdef USE_AS_MEMMOVE
 	mov	%rsi, %r9
@@ -1587,7 +1587,7 @@ L(gobble_mem_bwd):
 #ifdef SHARED_CACHE_SIZE_HALF
 	mov	$SHARED_CACHE_SIZE_HALF, %RCX_LP
 #else
-	mov	__x86_64_shared_cache_size_half(%rip), %RCX_LP
+	mov	__x86_shared_cache_size_half(%rip), %RCX_LP
 #endif
 #ifdef USE_AS_MEMMOVE
 	mov	%rdi, %r9
diff --git a/sysdeps/x86_64/multiarch/memcpy-ssse3.S b/sysdeps/x86_64/multiarch/memcpy-ssse3.S
index 4be7e7bc1e..9642ceecd9 100644
--- a/sysdeps/x86_64/multiarch/memcpy-ssse3.S
+++ b/sysdeps/x86_64/multiarch/memcpy-ssse3.S
@@ -99,7 +99,7 @@ L(80bytesormore):
 #ifdef SHARED_CACHE_SIZE_HALF
 	mov	$SHARED_CACHE_SIZE_HALF, %RCX_LP
 #else
-	mov	__x86_64_shared_cache_size_half(%rip), %RCX_LP
+	mov	__x86_shared_cache_size_half(%rip), %RCX_LP
 #endif
 	cmp	%rcx, %rdx
 	mov	%rsi, %r9
@@ -109,7 +109,7 @@ L(80bytesormore):
 #ifdef DATA_CACHE_SIZE_HALF
 	mov	$DATA_CACHE_SIZE_HALF, %RCX_LP
 #else
-	mov	__x86_64_data_cache_size_half(%rip), %RCX_LP
+	mov	__x86_data_cache_size_half(%rip), %RCX_LP
 #endif
 	BRANCH_TO_JMPTBL_ENTRY (L(shl_table), %r9, 4)
 
@@ -129,7 +129,7 @@ L(copy_backward):
 #ifdef SHARED_CACHE_SIZE_HALF
 	mov	$SHARED_CACHE_SIZE_HALF, %RCX_LP
 #else
-	mov	__x86_64_shared_cache_size_half(%rip), %RCX_LP
+	mov	__x86_shared_cache_size_half(%rip), %RCX_LP
 #endif
 
 	cmp	%rcx, %rdx
@@ -140,7 +140,7 @@ L(copy_backward):
 #ifdef DATA_CACHE_SIZE_HALF
 	mov	$DATA_CACHE_SIZE_HALF, %RCX_LP
 #else
-	mov	__x86_64_data_cache_size_half(%rip), %RCX_LP
+	mov	__x86_data_cache_size_half(%rip), %RCX_LP
 #endif
 	BRANCH_TO_JMPTBL_ENTRY (L(shl_table_bwd), %r9, 4)
 
@@ -177,7 +177,7 @@ L(shl_0_gobble):
 #ifdef DATA_CACHE_SIZE_HALF
 	cmp	$DATA_CACHE_SIZE_HALF, %RDX_LP
 #else
-	cmp	__x86_64_data_cache_size_half(%rip), %RDX_LP
+	cmp	__x86_data_cache_size_half(%rip), %RDX_LP
 #endif
 	lea	-128(%rdx), %rdx
 	jae	L(shl_0_gobble_mem_loop)
@@ -318,7 +318,7 @@ L(shl_0_gobble_bwd):
 #ifdef DATA_CACHE_SIZE_HALF
 	cmp	$DATA_CACHE_SIZE_HALF, %RDX_LP
 #else
-	cmp	__x86_64_data_cache_size_half(%rip), %RDX_LP
+	cmp	__x86_data_cache_size_half(%rip), %RDX_LP
 #endif
 	lea	-128(%rdx), %rdx
 	jae	L(shl_0_gobble_mem_bwd_loop)