about summary refs log tree commit diff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/bits/atomic.h334
-rw-r--r--sysdeps/x86_64/bits/byteswap.h7
-rw-r--r--sysdeps/x86_64/dl-machine.h8
-rw-r--r--sysdeps/x86_64/fpu/e_log10l.S5
-rw-r--r--sysdeps/x86_64/fpu/e_log2l.S5
-rw-r--r--sysdeps/x86_64/fpu/e_logl.S12
-rw-r--r--sysdeps/x86_64/fpu/e_powl.S19
-rw-r--r--sysdeps/x86_64/fpu/math_private.h21
-rw-r--r--sysdeps/x86_64/fpu/s_copysign.S5
-rw-r--r--sysdeps/x86_64/fpu/s_copysignf.S3
-rw-r--r--sysdeps/x86_64/fpu/s_log1pl.S5
-rw-r--r--sysdeps/x86_64/ldbl2mpn.c1
-rw-r--r--sysdeps/x86_64/soft-fp/sfp-machine.h51
13 files changed, 205 insertions, 271 deletions
diff --git a/sysdeps/x86_64/bits/atomic.h b/sysdeps/x86_64/bits/atomic.h
index 65d6b02008..133a68d192 100644
--- a/sysdeps/x86_64/bits/atomic.h
+++ b/sysdeps/x86_64/bits/atomic.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -18,7 +18,6 @@
    02111-1307 USA.  */
 
 #include <stdint.h>
-#include <tls.h>	/* For tcbhead_t.  */
 
 
 typedef int8_t atomic8_t;
@@ -81,54 +80,8 @@ typedef uintmax_t uatomic_max_t;
   ({ __typeof (*mem) ret;						      \
      __asm __volatile (LOCK_PREFIX "cmpxchgq %q2, %1"			      \
 		       : "=a" (ret), "=m" (*mem)			      \
-		       : "r" ((long int) (newval)), "m" (*mem),		      \
-			 "0" ((long int) (oldval)));			      \
-     ret; })
-
-
-#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
-  ({ __typeof (*mem) ret;						      \
-    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
-		      "je 0f\n\t"					      \
-		      "lock\n"						      \
-		       "0:\tcmpxchgb %b2, %1"				      \
-		       : "=a" (ret), "=m" (*mem)			      \
-		       : "q" (newval), "m" (*mem), "0" (oldval),	      \
-			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
-     ret; })
-
-#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
-  ({ __typeof (*mem) ret;						      \
-    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
-		      "je 0f\n\t"					      \
-		      "lock\n"						      \
-		       "0:\tcmpxchgw %w2, %1"				      \
-		       : "=a" (ret), "=m" (*mem)			      \
-		       : "q" (newval), "m" (*mem), "0" (oldval),	      \
-			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
-     ret; })
-
-#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
-  ({ __typeof (*mem) ret;						      \
-    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
-		      "je 0f\n\t"					      \
-		      "lock\n"						      \
-		       "0:\tcmpxchgl %2, %1"				      \
-		       : "=a" (ret), "=m" (*mem)			      \
-		       : "q" (newval), "m" (*mem), "0" (oldval),	      \
-			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
-     ret; })
-
-#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
-  ({ __typeof (*mem) ret;						      \
-     __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
-		       "je 0f\n\t"					      \
-		       "lock\n"						      \
-		       "0:\tcmpxchgq %q2, %1"				      \
-		       : "=a" (ret), "=m" (*mem)			      \
-		       : "q" ((long int) (newval)), "m" (*mem),		      \
-			 "0" ((long int)oldval),			      \
-			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
+		       : "r" ((long) (newval)), "m" (*mem),		      \
+			 "0" ((long) (oldval)));			      \
      ret; })
 
 
@@ -154,76 +107,49 @@ typedef uintmax_t uatomic_max_t;
      result; })
 
 
-#define __arch_exchange_and_add_body(lock, mem, value)			      \
+#define atomic_exchange_and_add(mem, value) \
   ({ __typeof (*mem) result;						      \
      if (sizeof (*mem) == 1)						      \
-       __asm __volatile (lock "xaddb %b0, %1"				      \
+       __asm __volatile (LOCK_PREFIX "xaddb %b0, %1"			      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" (value), "m" (*mem),			      \
-			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
+			 : "0" (value), "m" (*mem));			      \
      else if (sizeof (*mem) == 2)					      \
-       __asm __volatile (lock "xaddw %w0, %1"				      \
+       __asm __volatile (LOCK_PREFIX "xaddw %w0, %1"			      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" (value), "m" (*mem),			      \
-			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
+			 : "0" (value), "m" (*mem));			      \
      else if (sizeof (*mem) == 4)					      \
-       __asm __volatile (lock "xaddl %0, %1"				      \
+       __asm __volatile (LOCK_PREFIX "xaddl %0, %1"			      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" (value), "m" (*mem),			      \
-			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
+			 : "0" (value), "m" (*mem));			      \
      else								      \
-       __asm __volatile (lock "xaddq %q0, %1"				      \
+       __asm __volatile (LOCK_PREFIX "xaddq %q0, %1"			      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" ((long) (value)), "m" (*mem),		      \
-			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
+			 : "0" ((long) (value)), "m" (*mem));		      \
      result; })
 
-#define atomic_exchange_and_add(mem, value) \
-  __arch_exchange_and_add_body (LOCK_PREFIX, mem, value)
-
-#define __arch_exchange_and_add_cprefix \
-  "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_exchange_and_add(mem, value) \
-  __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
-
-
-#define __arch_add_body(lock, pfx, mem, value)				      \
-  do {									      \
-    if (__builtin_constant_p (value) && (value) == 1)			      \
-      pfx##_increment (mem);						      \
-    else if (__builtin_constant_p (value) && (value) == -1)		      \
-      pfx##_decrement (mem);						      \
-    else if (sizeof (*mem) == 1)					      \
-      __asm __volatile (lock "addb %b1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (value), "m" (*mem),			      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (lock "addw %w1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (value), "m" (*mem),			      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (lock "addl %1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (value), "m" (*mem),			      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else								      \
-      __asm __volatile (lock "addq %q1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" ((long) (value)), "m" (*mem),		      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-  } while (0)
 
 #define atomic_add(mem, value) \
-  __arch_add_body (LOCK_PREFIX, atomic, mem, value)
-
-#define __arch_add_cprefix \
-  "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_add(mem, value) \
-  __arch_add_body (__arch_add_cprefix, catomic, mem, value)
+  (void) ({ if (__builtin_constant_p (value) && (value) == 1)		      \
+	      atomic_increment (mem);					      \
+	    else if (__builtin_constant_p (value) && (value) == 1)	      \
+	      atomic_decrement (mem);					      \
+	    else if (sizeof (*mem) == 1)				      \
+	      __asm __volatile (LOCK_PREFIX "addb %b1, %0"		      \
+				: "=m" (*mem)				      \
+				: "ir" (value), "m" (*mem));		      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK_PREFIX "addw %w1, %0"		      \
+				: "=m" (*mem)				      \
+				: "ir" (value), "m" (*mem));		      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK_PREFIX "addl %1, %0"		      \
+				: "=m" (*mem)				      \
+				: "ir" (value), "m" (*mem));		      \
+	    else							      \
+	      __asm __volatile (LOCK_PREFIX "addq %q1, %0"		      \
+				: "=m" (*mem)				      \
+				: "ir" ((long) (value)), "m" (*mem));	      \
+	    })
 
 
 #define atomic_add_negative(mem, value) \
@@ -268,37 +194,24 @@ typedef uintmax_t uatomic_max_t;
      __result; })
 
 
-#define __arch_increment_body(lock, mem) \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (lock "incb %b0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (lock "incw %w0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (lock "incl %0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else								      \
-      __asm __volatile (lock "incq %q0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-  } while (0)
-
-#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
-
-#define __arch_increment_cprefix \
-  "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_increment(mem) \
-  __arch_increment_body (__arch_increment_cprefix, mem)
+#define atomic_increment(mem) \
+  (void) ({ if (sizeof (*mem) == 1)					      \
+	      __asm __volatile (LOCK_PREFIX "incb %b0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK_PREFIX "incw %w0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK_PREFIX "incl %0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    else							      \
+	      __asm __volatile (LOCK_PREFIX "incq %q0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    })
 
 
 #define atomic_increment_and_test(mem) \
@@ -322,37 +235,24 @@ typedef uintmax_t uatomic_max_t;
      __result; })
 
 
-#define __arch_decrement_body(lock, mem) \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (lock "decb %b0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (lock "decw %w0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (lock "decl %0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else								      \
-      __asm __volatile (lock "decq %q0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem),					      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-  } while (0)
-
-#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
-
-#define __arch_decrement_cprefix \
-  "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_decrement(mem) \
-  __arch_decrement_body (__arch_decrement_cprefix, mem)
+#define atomic_decrement(mem) \
+  (void) ({ if (sizeof (*mem) == 1)					      \
+	      __asm __volatile (LOCK_PREFIX "decb %b0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK_PREFIX "decw %w0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK_PREFIX "decl %0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    else							      \
+	      __asm __volatile (LOCK_PREFIX "decq %q0"			      \
+				: "=m" (*mem)				      \
+				: "m" (*mem));				      \
+	    })
 
 
 #define atomic_decrement_and_test(mem) \
@@ -377,28 +277,27 @@ typedef uintmax_t uatomic_max_t;
 
 
 #define atomic_bit_set(mem, bit) \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (LOCK_PREFIX "orb %b2, %0"			      \
-			: "=m" (*mem)					      \
-			: "m" (*mem), "ir" (1L << (bit)));		      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (LOCK_PREFIX "orw %w2, %0"			      \
-			: "=m" (*mem)					      \
-			: "m" (*mem), "ir" (1L << (bit)));		      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (LOCK_PREFIX "orl %2, %0"			      \
-			: "=m" (*mem)					      \
-			: "m" (*mem), "ir" (1L << (bit)));		      \
-    else if (__builtin_constant_p (bit) && (bit) < 32)			      \
-      __asm __volatile (LOCK_PREFIX "orq %2, %0"			      \
-			: "=m" (*mem)					      \
-			: "m" (*mem), "i" (1L << (bit)));		      \
-    else								      \
-      __asm __volatile (LOCK_PREFIX "orq %q2, %0"			      \
-			: "=m" (*mem)					      \
-			: "m" (*mem), "r" (1UL << (bit)));		      \
-  } while (0)
+  (void) ({ if (sizeof (*mem) == 1)					      \
+	      __asm __volatile (LOCK_PREFIX "orb %b2, %0"		      \
+				: "=m" (*mem)				      \
+				: "m" (*mem), "ir" (1L << (bit)));	      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK_PREFIX "orw %w2, %0"		      \
+				: "=m" (*mem)				      \
+				: "m" (*mem), "ir" (1L << (bit)));	      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK_PREFIX "orl %2, %0"		      \
+				: "=m" (*mem)				      \
+				: "m" (*mem), "ir" (1L << (bit)));	      \
+	    else if (__builtin_constant_p (bit) && (bit) < 32)		      \
+	      __asm __volatile (LOCK_PREFIX "orq %2, %0"		      \
+				: "=m" (*mem)				      \
+				: "m" (*mem), "i" (1L << (bit)));	      \
+	    else							      \
+	      __asm __volatile (LOCK_PREFIX "orq %q2, %0"		      \
+				: "=m" (*mem)				      \
+				: "m" (*mem), "r" (1UL << (bit)));	      \
+	    })
 
 
 #define atomic_bit_test_set(mem, bit) \
@@ -423,56 +322,3 @@ typedef uintmax_t uatomic_max_t;
 
 
 #define atomic_delay() asm ("rep; nop")
-
-
-#define atomic_and(mem, mask) \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (LOCK_PREFIX "andb %1, %b0"			      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (LOCK_PREFIX "andw %1, %w0"			      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (LOCK_PREFIX "andl %1, %0"			      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else								      \
-      __asm __volatile (LOCK_PREFIX "andq %1, %q0"			      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-  } while (0)
-
-
-#define __arch_or_body(lock, mem, mask)					      \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (lock "orb %1, %b0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem),			      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (lock "orw %1, %w0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem),			      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (lock "orl %1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem),			      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-    else								      \
-      __asm __volatile (lock "orq %1, %q0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem),			      \
-			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
-  } while (0)
-
-#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
-
-#define __arch_or_cprefix \
-  "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
-
-#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)
diff --git a/sysdeps/x86_64/bits/byteswap.h b/sysdeps/x86_64/bits/byteswap.h
index e1c861c75f..ec2b17889d 100644
--- a/sysdeps/x86_64/bits/byteswap.h
+++ b/sysdeps/x86_64/bits/byteswap.h
@@ -1,5 +1,6 @@
 /* Macros to swap the order of bytes in integer values.
-   Copyright (C) 1997, 1998, 2000, 2002, 2003 Free Software Foundation, Inc.
+   Copyright (C) 1997, 1998, 2000, 2002, 2003, 2007
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -59,7 +60,9 @@
 # if __WORDSIZE == 64 || (defined __i486__ || defined __pentium__	      \
 			  || defined __pentiumpro__ || defined __pentium4__   \
 			  || defined __k8__ || defined __athlon__	      \
-			  || defined __k6__)
+			  || defined __k6__ || defined __nocona__	      \
+			  || defined __core2__ || defined __geode__	      \
+			  || defined __amdfam10__)
 /* To swap the bytes in a word the i486 processors and up provide the
    `bswap' opcode.  On i386 we have to use three instructions.  */
 #  define __bswap_32(x) \
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index 31a7013d50..73e271775a 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -1,5 +1,5 @@
 /* Machine-dependent ELF dynamic relocation inline functions.  x86-64 version.
-   Copyright (C) 2001-2005, 2006 Free Software Foundation, Inc.
+   Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Andreas Jaeger <aj@suse.de>.
 
@@ -190,7 +190,7 @@ _dl_start_user:\n\
    define the value.
    ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
    of the main executable's symbols, as for a COPY reloc.  */
-#if !defined RTLD_BOOTSTRAP || USE___THREAD
+#if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD)
 # define elf_machine_type_class(type)					      \
   ((((type) == R_X86_64_JUMP_SLOT					      \
      || (type) == R_X86_64_DTPMOD64					      \
@@ -300,7 +300,7 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
 	  *reloc_addr = value + reloc->r_addend;
 	  break;
 
-#ifndef RESOLVE_CONFLICT_FIND_MAP
+#if defined USE_TLS && !defined RESOLVE_CONFLICT_FIND_MAP
 	case R_X86_64_DTPMOD64:
 # ifdef RTLD_BOOTSTRAP
 	  /* During startup the dynamic linker is always the module
@@ -339,7 +339,7 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
 			     - sym_map->l_tls_offset);
 	    }
 	  break;
-#endif
+#endif	/* use TLS */
 
 #ifndef RTLD_BOOTSTRAP
 	case R_X86_64_64:
diff --git a/sysdeps/x86_64/fpu/e_log10l.S b/sysdeps/x86_64/fpu/e_log10l.S
index b4343bef45..633234b744 100644
--- a/sysdeps/x86_64/fpu/e_log10l.S
+++ b/sysdeps/x86_64/fpu/e_log10l.S
@@ -42,7 +42,7 @@ ENTRY(__ieee754_log10l)
 	fxam
 	fnstsw
 	fld	%st		// x : x : log10(2)
-	andb	$1,%ah
+	testb	$1, %ah
 	jnz	3f		// in case x is NaN or ħInf
 4:	fsubl	MO(one)		// x-1 : x : log10(2)
 	fld	%st		// x-1 : x-1 : x : log10(2)
@@ -59,7 +59,8 @@ ENTRY(__ieee754_log10l)
 	fyl2x			// log10(x)
 	ret
 
-3:	jp	4b		// in case x is ħInf
+3:	testb	$4, %ah
+	jnz	4b		// in case x is ħInf
 	fstp	%st(1)
 	fstp	%st(1)
 	ret
diff --git a/sysdeps/x86_64/fpu/e_log2l.S b/sysdeps/x86_64/fpu/e_log2l.S
index 7a89b94d9f..f04d30a05a 100644
--- a/sysdeps/x86_64/fpu/e_log2l.S
+++ b/sysdeps/x86_64/fpu/e_log2l.S
@@ -39,7 +39,7 @@ ENTRY(__ieee754_log2l)
 	fxam
 	fnstsw
 	fld	%st		// x : x : 1
-	andb	$1,%ah
+	testb	$1, %ah
 	jnz	3f		// in case x is NaN or ħInf
 4:	fsub	%st(2), %st	// x-1 : x : 1
 	fld	%st		// x-1 : x-1 : x : 1
@@ -56,7 +56,8 @@ ENTRY(__ieee754_log2l)
 	fyl2x			// log(x)
 	ret
 
-3:	jp	4b		// in case x is ħInf
+3:	testb	$4, %ah
+	jnz	4b		// in case x is ħInf
 	fstp	%st(1)
 	fstp	%st(1)
 	ret
diff --git a/sysdeps/x86_64/fpu/e_logl.S b/sysdeps/x86_64/fpu/e_logl.S
index a0bed663c8..2ba91eedfd 100644
--- a/sysdeps/x86_64/fpu/e_logl.S
+++ b/sysdeps/x86_64/fpu/e_logl.S
@@ -38,8 +38,12 @@ limit:	.double 0.29
 ENTRY(__ieee754_logl)
 	fldln2			// log(2)
 	fldt	8(%rsp)		// x : log(2)
+	fxam
+	fnstsw
 	fld	%st		// x : x : log(2)
-	fsubl	MO(one)		// x-1 : x : log(2)
+	testb	$1, %ah
+	jnz	3f		// in case x is NaN or +-Inf
+4:	fsubl	MO(one)		// x-1 : x : log(2)
 	fld	%st		// x-1 : x-1 : x : log(2)
 	fabs			// |x-1| : x-1 : x : log(2)
 	fcompl	MO(limit)	// x-1 : x : log(2)
@@ -53,4 +57,10 @@ ENTRY(__ieee754_logl)
 2:	fstp	%st(0)		// x : log(2)
 	fyl2x			// log(x)
 	ret
+
+3:	testb	$4, %ah
+	jnz	4b		// in case x is +-Inf
+	fstp	%st(1)
+	fstp	%st(1)
+	ret
 END (__ieee754_logl)
diff --git a/sysdeps/x86_64/fpu/e_powl.S b/sysdeps/x86_64/fpu/e_powl.S
index 85f4deb3c7..4959bea7ac 100644
--- a/sysdeps/x86_64/fpu/e_powl.S
+++ b/sysdeps/x86_64/fpu/e_powl.S
@@ -1,5 +1,6 @@
 /* ix87 specific implementation of pow function.
-   Copyright (C) 1996, 1997, 1998, 1999, 2001, 2004 Free Software Foundation, Inc.
+   Copyright (C) 1996, 1997, 1998, 1999, 2001, 2004, 2007
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@cygnus.com>, 1996.
 
@@ -146,10 +147,11 @@ ENTRY(__ieee754_powl)
 2:	/* y is a real number.  */
 	fxch			// x : y
 	fldl	MO(one)		// 1.0 : x : y
-	fld	%st(1)		// x : 1.0 : x : y
-	fsub	%st(1)		// x-1 : 1.0 : x : y
-	fabs			// |x-1| : 1.0 : x : y
-	fcompl	MO(limit)	// 1.0 : x : y
+	fldl	MO(limit)	// 0.29 : 1.0 : x : y
+	fld	%st(2)		// x : 0.29 : 1.0 : x : y
+	fsub	%st(2)		// x-1 : 0.29 : 1.0 : x : y
+	fabs			// |x-1| : 0.29 : 1.0 : x : y
+	fucompp			// 1.0 : x : y
 	fnstsw
 	fxch			// x : 1.0 : y
 	test	$4500,%eax
@@ -190,9 +192,10 @@ ENTRY(__ieee754_powl)
 	// y == ħinf
 	.align ALIGNARG(4)
 12:	fstp	%st(0)		// pop y
-	fldt	8(%rsp)		// x
-	fabs
-	fcompl	MO(one)		// < 1, == 1, or > 1
+	fldl	MO(one)		// 1
+	fldt	8(%rsp)		// x : 1
+	fabs			// abs(x) : 1
+	fucompp			// < 1, == 1, or > 1
 	fnstsw
 	andb	$0x45, %ah
 	cmpb	$0x45, %ah
diff --git a/sysdeps/x86_64/fpu/math_private.h b/sysdeps/x86_64/fpu/math_private.h
new file mode 100644
index 0000000000..4febcbb5ec
--- /dev/null
+++ b/sysdeps/x86_64/fpu/math_private.h
@@ -0,0 +1,21 @@
+#ifndef _MATH_PRIVATE_H
+
+#define math_opt_barrier(x) \
+({ __typeof(x) __x;					\
+   if (sizeof (x) <= sizeof (double))			\
+     __asm ("" : "=x" (__x) : "0" (x));			\
+   else							\
+     __asm ("" : "=t" (__x) : "0" (x));			\
+   __x; })
+#define math_force_eval(x) \
+do							\
+  {							\
+    if (sizeof (x) <= sizeof (double))			\
+      __asm __volatile ("" : : "x" (x));		\
+    else						\
+      __asm __volatile ("" : : "f" (x));		\
+  }							\
+while (0)
+
+#include <math/math_private.h>
+#endif
diff --git a/sysdeps/x86_64/fpu/s_copysign.S b/sysdeps/x86_64/fpu/s_copysign.S
index f3d9b0cbb4..f1ebcf8bf1 100644
--- a/sysdeps/x86_64/fpu/s_copysign.S
+++ b/sysdeps/x86_64/fpu/s_copysign.S
@@ -1,5 +1,5 @@
 /* copy sign, double version.
-   Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+   Copyright (C) 2002 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Andreas Jaeger <aj@suse.de>, 2002.
 
@@ -31,8 +31,6 @@
 signmask:
 	.byte 0, 0, 0, 0, 0, 0, 0, 0x80
 	.byte 0, 0, 0, 0, 0, 0, 0, 0
-	ASM_SIZE_DIRECTIVE(signmask)
-	ASM_TYPE_DIRECTIVE(othermask,@object)
 othermask:
 	.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f
 	.byte 0, 0, 0, 0, 0, 0, 0, 0
@@ -44,7 +42,6 @@ othermask:
 #define MO(op) op
 #endif
 
-	.text
 ENTRY(__copysign)
 	andpd MO(othermask),%xmm0
 	andpd MO(signmask),%xmm1
diff --git a/sysdeps/x86_64/fpu/s_copysignf.S b/sysdeps/x86_64/fpu/s_copysignf.S
index 0fbe1d4c96..f5dc5f78ad 100644
--- a/sysdeps/x86_64/fpu/s_copysignf.S
+++ b/sysdeps/x86_64/fpu/s_copysignf.S
@@ -1,5 +1,5 @@
 /* copy sign, double version.
-   Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+   Copyright (C) 2002 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Andreas Jaeger <aj@suse.de>, 2002.
 
@@ -38,7 +38,6 @@ mask:
 #define MO(op) op
 #endif
 
-	.text
 ENTRY(__copysignf)
 	movss	MO(mask),%xmm3
 	andps	%xmm3,%xmm0
diff --git a/sysdeps/x86_64/fpu/s_log1pl.S b/sysdeps/x86_64/fpu/s_log1pl.S
index 7fbd0e5aaa..ac2bd22a4f 100644
--- a/sysdeps/x86_64/fpu/s_log1pl.S
+++ b/sysdeps/x86_64/fpu/s_log1pl.S
@@ -45,7 +45,7 @@ ENTRY(__log1pl)
 	fxam
 	fnstsw
 	fld	%st
-	andb	$1,%ah
+	testb	$1, %ah
 	jnz	3f		// in case x is NaN or ħInf
 4:
 	fabs
@@ -62,7 +62,8 @@ ENTRY(__log1pl)
 2:	fyl2xp1
 	ret
 
-3:	jp	4b		// in case x is ħInf
+3:	testb	$4, %ah
+	jnz	4b		// in case x is ħInf
 	fstp	%st(1)
 	fstp	%st(1)
 	ret
diff --git a/sysdeps/x86_64/ldbl2mpn.c b/sysdeps/x86_64/ldbl2mpn.c
new file mode 100644
index 0000000000..641b789cd4
--- /dev/null
+++ b/sysdeps/x86_64/ldbl2mpn.c
@@ -0,0 +1 @@
+#include "../i386/ldbl2mpn.c"
diff --git a/sysdeps/x86_64/soft-fp/sfp-machine.h b/sysdeps/x86_64/soft-fp/sfp-machine.h
new file mode 100644
index 0000000000..77df02380c
--- /dev/null
+++ b/sysdeps/x86_64/soft-fp/sfp-machine.h
@@ -0,0 +1,51 @@
+#define _FP_W_TYPE_SIZE		64
+#define _FP_W_TYPE		unsigned long
+#define _FP_WS_TYPE		signed long
+#define _FP_I_TYPE		long
+
+#define __FP_CLZ(r, x)							\
+  do {									\
+    __asm__("bsrq %1,%0" : "=r"(r) : "g"(x) : "cc");			\
+    r ^= 63;								\
+  } while (0)
+
+#define _FP_NANFRAC_S		_FP_QNANBIT_S
+#define _FP_NANFRAC_D		_FP_QNANBIT_D, 0
+#define _FP_NANFRAC_Q		_FP_QNANBIT_Q, 0, 0, 0
+#define _FP_NANSIGN_S		1
+#define _FP_NANSIGN_D		1
+#define _FP_NANSIGN_Q		1
+
+#define _FP_KEEPNANFRACP 1
+/* Here is something Intel misdesigned: the specs don't define
+   the case where we have two NaNs with same mantissas, but
+   different sign. Different operations pick up different NaNs.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)			\
+  do {								\
+    if (_FP_FRAC_GT_##wc(X, Y)					\
+	|| (_FP_FRAC_EQ_##wc(X,Y) && (OP == '+' || OP == '*')))	\
+      {								\
+	R##_s = X##_s;						\
+        _FP_FRAC_COPY_##wc(R,X);				\
+      }								\
+    else							\
+      {								\
+	R##_s = Y##_s;						\
+        _FP_FRAC_COPY_##wc(R,Y);				\
+      }								\
+    R##_c = FP_CLS_NAN;						\
+  } while (0)
+
+#define FP_EX_INVALID           (1 << 0)
+#define FP_EX_DENORM		(1 << 1)
+#define FP_EX_DIVZERO           (1 << 2)
+#define FP_EX_OVERFLOW          (1 << 3)
+#define FP_EX_UNDERFLOW         (1 << 4)
+#define FP_EX_INEXACT           (1 << 5)
+
+#define FP_RND_NEAREST		0
+#define FP_RND_ZERO		3
+#define FP_RND_PINF		2
+#define FP_RND_MINF		1
+