summary refs log tree commit diff
diff options
context:
space:
mode:
authorAndreas Schwab <schwab@redhat.com>2010-05-31 16:40:07 +0200
committerAndreas Schwab <schwab@redhat.com>2010-05-31 16:40:37 +0200
commite4f86066d211bea24b760ea74bbc6b26f66040b6 (patch)
tree8cb043416913681ddb3c53e9ea84fda09b431859
parentaa3e9bcc7d94da0c6537eca4f4194898e4369fa3 (diff)
parenteb5ad2eb0d06326846ed37addebe187a0f67c7c7 (diff)
downloadglibc-e4f86066d211bea24b760ea74bbc6b26f66040b6.tar.gz
glibc-e4f86066d211bea24b760ea74bbc6b26f66040b6.tar.xz
glibc-e4f86066d211bea24b760ea74bbc6b26f66040b6.zip
Merge remote branch 'origin/master' into fedora/master
-rw-r--r--ChangeLog45
-rw-r--r--elf/dl-runtime.c12
-rw-r--r--sunrpc/clnt_tcp.c1
-rw-r--r--sunrpc/clnt_udp.c1
-rw-r--r--sunrpc/clnt_unix.c1
-rw-r--r--sysdeps/powerpc/powerpc32/power7/memcpy.S4
-rw-r--r--sysdeps/powerpc/powerpc32/power7/memset.S434
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memset.S398
-rw-r--r--sysdeps/unix/sysv/linux/Makefile2
-rw-r--r--sysdeps/unix/sysv/linux/internal_recvmmsg.S14
-rw-r--r--sysdeps/unix/sysv/linux/kernel-features.h5
-rw-r--r--sysdeps/unix/sysv/linux/recvmmsg.c100
-rw-r--r--sysdeps/unix/sysv/linux/sh/sh4/register-dump.h4
-rw-r--r--sysdeps/unix/sysv/linux/socketcall.h1
-rw-r--r--sysdeps/unix/sysv/linux/sys/timex.h6
-rw-r--r--sysdeps/unix/sysv/linux/syscalls.list1
-rw-r--r--sysdeps/x86_64/multiarch/init-arch.c6
17 files changed, 1018 insertions, 17 deletions
diff --git a/ChangeLog b/ChangeLog
index 3c7e824691..7eff3a8363 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2010-05-28  Luis Machado  <luisgpm@br.ibm.com>
+
+	* sysdeps/powerpc/powerpc32/power7/memcpy.S: Exchange srdi for srwi.
+
 2010-05-27  Andreas Schwab  <schwab@redhat.com>
 
 	* elf/Makefile ($(objpfx)tst-tls10): Depend on
@@ -5,6 +9,47 @@
 	($(objpfx)tst-tls11): Depend on $(objpfx)tst-tlsmod9.so.
 	($(objpfx)tst-tls12): Depend on $(objpfx)tst-tlsmod11.so.
 
+2010-05-26  H.J. Lu  <hongjiu.lu@intel.com>
+
+	[BZ #11640]
+	* sysdeps/x86_64/multiarch/init-arch.c (__init_cpu_features):
+	Properly check family and model.
+
+2010-05-26  Takashi Yoshii  <takashi.yoshii.zj@renesas.com>
+
+	* sysdeps/unix/sysv/linux/sh/sh4/register-dump.h: Fix iov[] size.
+
+2010-05-24  Luis Machado  <luisgpm@br.ibm.com>
+
+	* sysdeps/powerpc/powerpc32/power7/memset.S: POWER7 32-bit memset fix.
+
+2010-05-21  Ulrich Drepper  <drepper@redhat.com>
+
+	* elf/dl-runtime.c (_dl_profile_fixup): Don't crash on unresolved weak
+	symbol reference.
+
+2010-05-19  Andreas Schwab  <schwab@redhat.com>
+
+	* elf/dl-runtime.c (_dl_fixup): Don't crash on unresolved weak
+	symbol reference.
+
+2010-05-21  Andreas Schwab  <schwab@redhat.com>
+
+	* sysdeps/unix/sysv/linux/Makefile (sysdep_routines): Add recvmmsg
+	and internal_recvmmsg.
+	* sysdeps/unix/sysv/linux/recvmmsg.c: New file.
+	* sysdeps/unix/sysv/linux/internal_recvmmsg.S: New file.
+	* sysdeps/unix/sysv/linux/socketcall.h (SOCKOP_recvmmsg): Define.
+	* sysdeps/unix/sysv/linux/syscalls.list (recvmmsg): Remove.
+
+	* sunrpc/clnt_tcp.c (clnttcp_control): Add missing break.
+	* sunrpc/clnt_udp.c (clntudp_control): Likewise.
+	* sunrpc/clnt_unix.c (clntunix_control): Likewise.
+
+2010-05-20  Andreas Schwab  <schwab@redhat.com>
+
+	* sysdeps/unix/sysv/linux/sys/timex.h: Use __REDIRECT_NTH.
+
 2010-05-17  Luis Machado  <luisgpm@br.ibm.com>
 
 	POWER7 optimizations.
diff --git a/elf/dl-runtime.c b/elf/dl-runtime.c
index a52120d121..6847edafc6 100644
--- a/elf/dl-runtime.c
+++ b/elf/dl-runtime.c
@@ -1,5 +1,5 @@
 /* On-demand PLT fixup for shared objects.
-   Copyright (C) 1995-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+   Copyright (C) 1995-2009, 2010 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -144,7 +144,8 @@ _dl_fixup (
   /* And now perhaps the relocation addend.  */
   value = elf_machine_plt_value (l, reloc, value);
 
-  if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
+  if (sym != NULL
+      && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
     value = ((DL_FIXUP_VALUE_TYPE (*) (void)) DL_FIXUP_VALUE_ADDR (value)) ();
 
   /* Finally, fix up the plt itself.  */
@@ -231,8 +232,9 @@ _dl_profile_fixup (
 				       ? LOOKUP_VALUE_ADDRESS (result)
 					 + defsym->st_value : 0);
 
-	  if (__builtin_expect (ELFW(ST_TYPE) (defsym->st_info)
-				== STT_GNU_IFUNC, 0))
+	  if (defsym != NULL
+	      && __builtin_expect (ELFW(ST_TYPE) (defsym->st_info)
+				   == STT_GNU_IFUNC, 0))
 	    value = ((DL_FIXUP_VALUE_TYPE (*) (void))
 		     DL_FIXUP_VALUE_ADDR (value)) ();
 	}
@@ -369,7 +371,7 @@ _dl_profile_fixup (
       struct audit_ifaces *afct = GLRO(dl_audit);
       for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
 	{
- 	  if (afct->ARCH_LA_PLTENTER != NULL
+	  if (afct->ARCH_LA_PLTENTER != NULL
 	      && (reloc_result->enterexit
 		  & (LA_SYMB_NOPLTENTER << (2 * (cnt + 1)))) == 0)
 	    {
diff --git a/sunrpc/clnt_tcp.c b/sunrpc/clnt_tcp.c
index 1552be87ad..d26a1268ab 100644
--- a/sunrpc/clnt_tcp.c
+++ b/sunrpc/clnt_tcp.c
@@ -399,6 +399,7 @@ clnttcp_control (CLIENT *cl, int request, char *info)
       /* This will set the xid of the NEXT call */
       *(u_long *)ct->ct_mcall =  htonl (*(u_long *)info - 1);
       /* decrement by 1 as clnttcp_call() increments once */
+      break;
     case CLGET_VERS:
       /*
        * This RELIES on the information that, in the call body,
diff --git a/sunrpc/clnt_udp.c b/sunrpc/clnt_udp.c
index 62ee3a1c99..360e26a594 100644
--- a/sunrpc/clnt_udp.c
+++ b/sunrpc/clnt_udp.c
@@ -582,6 +582,7 @@ clntudp_control (CLIENT *cl, int request, char *info)
       /* This will set the xid of the NEXT call */
       *(u_long *)cu->cu_outbuf =  htonl(*(u_long *)info - 1);
       /* decrement by 1 as clntudp_call() increments once */
+      break;
     case CLGET_VERS:
       /*
        * This RELIES on the information that, in the call body,
diff --git a/sunrpc/clnt_unix.c b/sunrpc/clnt_unix.c
index db3ea312af..bca1273e22 100644
--- a/sunrpc/clnt_unix.c
+++ b/sunrpc/clnt_unix.c
@@ -376,6 +376,7 @@ clntunix_control (CLIENT *cl, int request, char *info)
       /* This will set the xid of the NEXT call */
       *(u_long *) ct->ct_mcall =  htonl (*(u_long *)info - 1);
       /* decrement by 1 as clntunix_call() increments once */
+      break;
     case CLGET_VERS:
       /*
        * This RELIES on the information that, in the call body,
diff --git a/sysdeps/powerpc/powerpc32/power7/memcpy.S b/sysdeps/powerpc/powerpc32/power7/memcpy.S
index e3dfd2ff92..f0c332f1ba 100644
--- a/sysdeps/powerpc/powerpc32/power7/memcpy.S
+++ b/sysdeps/powerpc/powerpc32/power7/memcpy.S
@@ -365,7 +365,7 @@ L(copy_GE_32_unaligned):
 	addi    3,3,8
 0:
 	clrlwi  10,12,28      /* Check alignment of SRC.  */
-	srdi    9,31,4	      /* Number of full quadwords remaining.  */
+	srwi    9,31,4	      /* Number of full quadwords remaining.  */
 
 	/* The proper alignment is present, it is OK to copy the bytes now.  */
 L(copy_GE_32_unaligned_cont):
@@ -375,7 +375,7 @@ L(copy_GE_32_unaligned_cont):
 	li      6,16	      /* Index for 16-bytes offsets.  */
 	li	7,32	      /* Index for 32-bytes offsets.  */
 	cmplwi  cr1,11,0
-	srdi    8,31,5	      /* Setup the loop counter.  */
+	srwi    8,31,5	      /* Setup the loop counter.  */
 	mr      10,3
 	mr      11,12
 	mtcrf   0x01,9
diff --git a/sysdeps/powerpc/powerpc32/power7/memset.S b/sysdeps/powerpc/powerpc32/power7/memset.S
new file mode 100644
index 0000000000..8aabb49327
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/power7/memset.S
@@ -0,0 +1,434 @@
+/* Optimized memset implementation for PowerPC32/POWER7.
+   Copyright (C) 2010 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+   Returns 's'.  */
+
+	.machine  power7
+EALIGN (BP_SYM (memset), 5, 0)
+	CALL_MCOUNT
+
+	.align	4
+L(_memset):
+	cmplwi	cr7,5,31
+	cmplwi	cr6,5,8
+	mr	10,3		/* Save original argument for later.  */
+	mr	7,1		/* Save original r1 for later.  */
+	cfi_offset(31,-8)
+
+	/* Replicate byte to word.  */
+	rlwimi	4,4,8,16,23
+	rlwimi	4,4,16,0,15
+
+	ble	cr6,L(small)	/* If length <= 8, use short copy code.  */
+
+	neg	0,3
+	ble	cr7,L(medium)	/* If length < 32, use medium copy code.  */
+
+	/* Save our word twice to create a doubleword that we will later
+	   copy to a FPR.  */
+	stwu	1,-32(1)
+	andi.	11,10,7		/* Check alignment of DST.  */
+	mr	12,5
+	stw	4,24(1)
+	stw	4,28(1)
+	beq	L(big_aligned)
+
+	clrlwi	0,0,29
+	mtocrf	0x01,0
+	subf	5,0,5
+
+	/* Get DST aligned to 8 bytes.  */
+1:	bf	31,2f
+
+	stb	4,0(10)
+	addi	10,10,1
+2:	bf	30,4f
+
+	sth	4,0(10)
+	addi	10,10,2
+4:	bf	29,L(big_aligned)
+
+	stw	4,0(10)
+	addi	10,10,4
+
+	.align	4
+L(big_aligned):
+	cmplwi	cr5,5,255
+	li	0,32
+	cmplwi	cr1,5,160
+	dcbtst	0,10
+	cmplwi	cr6,4,0
+	srwi	9,5,3		/* Number of full doublewords remaining.  */
+	crand	27,26,21
+	mtocrf	0x01,9
+	bt	27,L(huge)
+
+	/* From this point on, we'll copy 32+ bytes and the value
+	   isn't 0 (so we can't use dcbz).  */
+
+	srwi	8,5,5
+	clrlwi	11,5,29
+	cmplwi	cr6,11,0
+	cmplwi	cr1,9,4
+	mtctr	8
+
+	/* Copy 1~3 doublewords so the main loop starts
+	at a multiple of 32 bytes.  */
+
+	bf	30,1f
+
+	stw	4,0(10)
+	stw	4,4(10)
+	stw	4,8(10)
+	stw	4,12(10)
+	addi	10,10,16
+	bf	31,L(big_loop)
+
+	stw	4,0(10)
+	stw	4,4(10)
+	addi	10,10,8
+	mr	12,10
+	blt	cr1,L(tail_bytes)
+
+	b	L(big_loop)
+
+	.align	4
+1:	/* Copy 1 doubleword.  */
+	bf	31,L(big_loop)
+
+	stw	4,0(10)
+	stw	4,4(10)
+	addi	10,10,8
+
+	/* First use a 32-bytes loop with stw's to try and avoid the LHS due
+	   to the lfd we will do next.  Also, ping-pong through r10 and r12
+	   to avoid AGEN delays.  */
+	.align	4
+L(big_loop):
+	addi	12,10,32
+	stw	4,0(10)
+	stw	4,4(10)
+	stw	4,8(10)
+	stw	4,12(10)
+	stw	4,16(10)
+	stw	4,20(10)
+	stw	4,24(10)
+	stw	4,28(10)
+	bdz	L(tail_bytes)
+
+	addi	10,10,64
+	stw	4,0(12)
+	stw	4,4(12)
+	stw	4,8(12)
+	stw	4,12(12)
+	stw	4,16(12)
+	stw	4,20(12)
+	stw	4,24(12)
+	stw	4,28(12)
+	bdnz	L(big_loop_fast_setup)
+
+	mr	12,10
+	b	L(tail_bytes)
+
+	/* Now that we're probably past the LHS window, use the VSX to
+	   speed up the loop.  */
+L(big_loop_fast_setup):
+	li	11,24
+	li	6,16
+	lxvdsx	4,1,11
+
+	.align	4
+L(big_loop_fast):
+	addi	12,10,32
+	stxvd2x	4,0,10
+	stxvd2x	4,10,6
+	bdz	L(tail_bytes)
+
+	addi	10,10,64
+	stxvd2x	4,0,12
+	stxvd2x	4,12,6
+	bdnz	L(big_loop_fast)
+
+	mr	12,10
+
+	.align	4
+L(tail_bytes):
+
+	/* Check for tail bytes.  */
+	mr	1,7		/* Restore r1.  */
+	beqlr	cr6
+
+	clrlwi	0,5,29
+	mtocrf	0x01,0
+
+	/*  At this point we have a tail of 0-7 bytes and we know that the
+	destination is doubleword-aligned.  */
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	stw	4,0(12)
+	addi	12,12,4
+2:	/* Copy 2 bytes.  */
+	bf	30,1f
+
+	sth	4,0(12)
+	addi	12,12,2
+1:	/* Copy 1 byte.  */
+	bflr	31
+
+	stb	4,0(12)
+	blr
+
+
+	/* Special case when value is 0 and we have a long length to deal
+	   with.  Use dcbz to zero out 128-bytes at a time.  Before using
+	   dcbz though, we need to get the destination 128-bytes aligned.  */
+	.align	4
+L(huge):
+	lfd	4,24(1)
+	andi.	11,10,127
+	neg	0,10
+	beq	L(huge_aligned)
+
+	clrlwi	0,0,25
+	subf	5,0,5
+	srwi	0,0,3
+	mtocrf  0x01,0
+
+	/* Get DST aligned to 128 bytes.  */
+8:	bf	28,4f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	stfd	4,32(10)
+	stfd	4,40(10)
+	stfd	4,48(10)
+	stfd	4,56(10)
+	addi	10,10,64
+	.align	4
+4:	bf	29,2f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	addi	10,10,32
+	.align	4
+2:	bf	30,1f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	addi	10,10,16
+	.align	4
+1:	bf	31,L(huge_aligned)
+
+	stfd	4,0(10)
+	addi	10,10,8
+
+L(huge_aligned):
+	srwi	8,5,7
+	clrlwi	11,5,25
+	cmplwi	cr6,11,0
+	mtctr	8
+
+	/* Copies 128-bytes at a time.  */
+	.align	4
+L(huge_loop):
+	dcbz	0,10
+	addi	10,10,128
+	bdnz	L(huge_loop)
+
+	/* We have a tail of 0~127 bytes to handle.  */
+	mr	1,7		/* Restore r1.  */
+	beqlr	cr6
+
+	subf	9,3,10
+	subf	5,9,12
+	srwi	8,5,3
+	cmplwi	cr6,8,0
+	mtocrf	0x01,8
+
+	/* We have a tail o 1~127 bytes. Copy up to 15 doublewords for
+	speed.  We'll handle the resulting tail bytes later.  */
+	beq	cr6,L(tail)
+
+8:	bf	28,4f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	stfd	4,32(10)
+	stfd	4,40(10)
+	stfd	4,48(10)
+	stfd	4,56(10)
+	addi	10,10,64
+	.align	4
+4:	bf	29,2f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	addi	10,10,32
+	.align	4
+2:	bf	30,1f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	addi	10,10,16
+	.align	4
+1:	bf	31,L(tail)
+
+	stfd	4,0(10)
+	addi	10,10,8
+
+	/* Handle the rest of the tail bytes here.  */
+L(tail):
+	mtocrf	0x01,5
+
+	.align	4
+4:	bf	29,2f
+
+	stw	4,0(10)
+	addi	10,10,4
+	.align	4
+2:	bf	30,1f
+
+	sth	4,0(10)
+	addi	10,10,2
+	.align	4
+1:	bflr	31
+
+	stb	4,0(10)
+	blr
+
+
+	/* Expanded tree to copy tail bytes without increments.  */
+	.align	4
+L(copy_tail):
+	bf	29,L(FXX)
+
+	stw	4,0(10)
+	bf	30,L(TFX)
+
+	sth	4,4(10)
+	bflr	31
+
+	stb	4,6(10)
+	blr
+
+	.align	4
+L(FXX):	bf	30,L(FFX)
+
+	sth	4,0(10)
+	bflr	31
+
+	stb	4,2(10)
+	blr
+
+	.align	4
+L(TFX):	bflr	31
+
+	stb	4,4(10)
+	blr
+
+	.align	4
+L(FFX):	bflr	31
+
+	stb	4,0(10)
+	blr
+
+	/* Handle copies of 9~31 bytes.  */
+	.align	4
+L(medium):
+	/* At least 9 bytes to go.  */
+	andi.	11,10,3
+	clrlwi	0,0,30
+	beq	L(medium_aligned)
+
+	/* Force 4-bytes alignment for DST.  */
+	mtocrf	0x01,0
+	subf	5,0,5
+1:	/* Copy 1 byte.  */
+	bf	31,2f
+
+	stb	4,0(10)
+	addi	10,10,1
+2:	/* Copy 2 bytes.  */
+	bf	30,L(medium_aligned)
+
+	sth	4,0(10)
+	addi	10,10,2
+
+	.align	4
+L(medium_aligned):
+	/* At least 6 bytes to go, and DST is word-aligned.  */
+	cmplwi	cr1,5,16
+	mtocrf	0x01,5
+	blt	cr1,8f
+
+	/* Copy 16 bytes.  */
+	stw	4,0(10)
+	stw	4,4(10)
+	stw	4,8(10)
+	stw	4,12(10)
+	addi	10,10,16
+8:	/* Copy 8 bytes.  */
+	bf	28,4f
+
+	stw	4,0(10)
+	stw	4,4(10)
+	addi	10,10,8
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	stw	4,0(10)
+	addi	10,10,4
+2:	/* Copy 2-3 bytes.  */
+	bf	30,1f
+
+	sth	4,0(10)
+	addi	10,10,2
+1:	/* Copy 1 byte.  */
+	bflr	31
+
+	stb	4,0(10)
+	blr
+
+	/* Handles copies of 0~8 bytes.  */
+	.align	4
+L(small):
+	mtocrf	0x01,5
+	bne	cr6,L(copy_tail)
+
+	stw	4,0(10)
+	stw	4,4(10)
+	blr
+
+END (BP_SYM (memset))
+libc_hidden_builtin_def (memset)
diff --git a/sysdeps/powerpc/powerpc64/power7/memset.S b/sysdeps/powerpc/powerpc64/power7/memset.S
new file mode 100644
index 0000000000..02a9eedd6b
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power7/memset.S
@@ -0,0 +1,398 @@
+/* Optimized memset implementation for PowerPC64/POWER7.
+   Copyright (C) 2010 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+   Returns 's'.  */
+
+	.machine power7
+EALIGN (BP_SYM (memset), 5, 0)
+	CALL_MCOUNT 3
+
+L(_memset):
+	cmpldi	cr7,5,31
+	cmpldi	cr6,5,8
+	mr	10,3
+
+	/* Replicate byte to word.  */
+	rlwimi	4,4,8,16,23
+	rlwimi	4,4,16,0,15
+	ble	cr6,L(small)	/* If length <= 8, use short copy code.  */
+
+	neg	0,3
+	ble	cr7,L(medium)	/* If length < 32, use medium copy code.  */
+
+	andi.	11,10,7		/* Check alignment of SRC.  */
+	insrdi	4,4,32,0	/* Replicate word to double word.  */
+
+	mr	12,5
+	beq	L(big_aligned)
+
+	clrldi	0,0,61
+	mtocrf	0x01,0
+	subf	5,0,5
+
+	/* Get DST aligned to 8 bytes.  */
+1:	bf	31,2f
+
+	stb	4,0(10)
+	addi	10,10,1
+2:	bf	30,4f
+
+	sth	4,0(10)
+	addi	10,10,2
+4:	bf	29,L(big_aligned)
+
+	stw	4,0(10)
+	addi	10,10,4
+
+	.align	4
+L(big_aligned):
+
+	cmpldi	cr5,5,255
+	li	0,32
+	dcbtst	0,10
+	cmpldi	cr6,4,0
+	srdi	9,5,3	/* Number of full doublewords remaining.  */
+	crand	27,26,21
+	mtocrf	0x01,9
+	bt	27,L(huge)
+
+	/* From this point on, we'll copy 32+ bytes and the value
+	   isn't 0 (so we can't use dcbz).  */
+
+	srdi	8,5,5
+	clrldi	11,5,61
+	cmpldi	cr6,11,0
+	cmpldi	cr1,9,4
+	mtctr	8
+
+	/* Copy 1~3 doublewords so the main loop starts
+	at a multiple of 32 bytes.  */
+
+	bf	30,1f
+
+	std	4,0(10)
+	std	4,8(10)
+	addi	10,10,16
+	bf	31,L(big_loop)
+
+	std	4,0(10)
+	addi	10,10,8
+	mr	12,10
+	blt	cr1,L(tail_bytes)
+	b	L(big_loop)
+
+	.align	4
+1:	/* Copy 1 doubleword.  */
+	bf	31,L(big_loop)
+
+	std	4,0(10)
+	addi	10,10,8
+
+	/* Main aligned copy loop.  Copies 32-bytes at a time and
+	   ping-pong through r10 and r12 to avoid AGEN delays.  */
+	.align	4
+L(big_loop):
+	addi	12,10,32
+	std	4,0(10)
+	std	4,8(10)
+	std	4,16(10)
+	std	4,24(10)
+	bdz	L(tail_bytes)
+
+	addi	10,10,64
+	std	4,0(12)
+	std	4,8(12)
+	std	4,16(12)
+	std	4,24(12)
+	bdnz	L(big_loop)
+
+	mr	12,10
+	b	L(tail_bytes)
+
+	.align	4
+L(tail_bytes):
+
+	/* Check for tail bytes.  */
+	beqlr	cr6
+
+	clrldi	0,5,61
+	mtocrf	0x01,0
+
+	/*  At this point we have a tail of 0-7 bytes and we know that the
+	destination is doubleword-aligned.  */
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	stw	4,0(12)
+	addi	12,12,4
+2:	/* Copy 2 bytes.  */
+	bf	30,1f
+
+	sth	4,0(12)
+	addi	12,12,2
+1:	/* Copy 1 byte.  */
+	bflr	31
+
+	stb	4,0(12)
+	blr
+
+	/* Special case when value is 0 and we have a long length to deal
+	   with.  Use dcbz to zero out 128-bytes at a time.  Before using
+	   dcbz though, we need to get the destination 128-bytes aligned.  */
+	.align	4
+L(huge):
+	andi.	11,10,127
+	neg	0,10
+	beq	L(huge_aligned)
+
+	clrldi	0,0,57
+	subf	5,0,5
+	srdi	0,0,3
+	mtocrf	0x01,0
+
+	/* Get DST aligned to 128 bytes.  */
+8:	bf	28,4f
+
+	std	4,0(10)
+	std	4,8(10)
+	std	4,16(10)
+	std	4,24(10)
+	std	4,32(10)
+	std	4,40(10)
+	std	4,48(10)
+	std	4,56(10)
+	addi	10,10,64
+	.align	4
+4:	bf	29,2f
+
+	std	4,0(10)
+	std	4,8(10)
+	std	4,16(10)
+	std	4,24(10)
+	addi	10,10,32
+	.align	4
+2:	bf	30,1f
+
+	std	4,0(10)
+	std	4,8(10)
+	addi	10,10,16
+	.align	4
+1:	bf	31,L(huge_aligned)
+
+	std	4,0(10)
+	addi	10,10,8
+
+
+L(huge_aligned):
+	srdi	8,5,7
+	clrldi	11,5,57
+	cmpldi	cr6,11,0
+	mtctr	8
+
+	.align	4
+L(huge_loop):
+	dcbz	0,10
+	addi	10,10,128
+	bdnz	L(huge_loop)
+
+	/* Check how many bytes are still left.  */
+	beqlr	cr6
+
+	subf	9,3,10
+	subf	5,9,12
+	srdi	8,5,3
+	cmpldi	cr6,8,0
+	mtocrf	0x01,8
+
+	/* We have a tail o 1~127 bytes.  Copy up to 15 doublewords for
+	speed.  We'll handle the resulting tail bytes later.  */
+	beq	cr6,L(tail)
+
+8:	bf	28,4f
+
+	std	4,0(10)
+	std	4,8(10)
+	std	4,16(10)
+	std	4,24(10)
+	std	4,32(10)
+	std	4,40(10)
+	std	4,48(10)
+	std	4,56(10)
+	addi	10,10,64
+	.align	4
+4:	bf	29,2f
+
+	std	4,0(10)
+	std	4,8(10)
+	std	4,16(10)
+	std	4,24(10)
+	addi	10,10,32
+	.align	4
+2:	bf	30,1f
+
+	std	4,0(10)
+	std	4,8(10)
+	addi	10,10,16
+	.align	4
+1:	bf	31,L(tail)
+
+	std	4,0(10)
+	addi	10,10,8
+
+	/* Handle the rest of the tail bytes here.  */
+L(tail):
+	mtocrf	0x01,5
+
+	.align	4
+4:	bf	29,2f
+
+	stw	4,0(10)
+	addi	10,10,4
+	.align	4
+2:	bf	30,1f
+
+	sth	4,0(10)
+	addi	10,10,2
+	.align	4
+1:	bflr	31
+
+	stb	4,0(10)
+	blr
+
+	/* Expanded tree to copy tail bytes without increments.  */
+	.align	4
+L(copy_tail):
+	bf	29,L(FXX)
+
+	stw	4,0(10)
+	bf	30,L(TFX)
+
+	sth	4,4(10)
+	bflr	31
+
+	stb	4,6(10)
+	blr
+
+	.align	4
+L(FXX):	bf	30,L(FFX)
+
+	sth	4,0(10)
+	bflr	31
+
+	stb	4,2(10)
+	blr
+
+	.align	4
+L(TFX):	bflr	31
+
+	stb	4,4(10)
+	blr
+
+	.align	4
+L(FFX):	bflr	31
+
+	stb	4,0(10)
+	blr
+
+	/* Handle copies of 9~31 bytes.  */
+	.align	4
+L(medium):
+	/* At least 9 bytes to go.  */
+	andi.	11,10,3
+	clrldi	0,0,62
+	beq	L(medium_aligned)
+
+	/* Force 4-bytes alignment for SRC.  */
+	mtocrf	0x01,0
+	subf	5,0,5
+1:	/* Copy 1 byte.  */
+	bf	31,2f
+
+	stb	4,0(10)
+	addi	10,10,1
+2:	/* Copy 2 bytes.  */
+	bf	30,L(medium_aligned)
+
+	sth	4,0(10)
+	addi	10,10,2
+
+	.align	4
+L(medium_aligned):
+	/* At least 6 bytes to go, and DST is word-aligned.  */
+	cmpldi	cr1,5,16
+	mtocrf	0x01,5
+	blt	cr1,8f
+
+	/* Copy 16 bytes.  */
+	stw	4,0(10)
+	stw	4,4(10)
+	stw	4,8(10)
+	stw	4,12(10)
+	addi	10,10,16
+8:	/* Copy 8 bytes.  */
+	bf	28,4f
+
+	stw	4,0(10)
+	stw	4,4(10)
+	addi	10,10,8
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	stw	4,0(10)
+	addi	10,10,4
+2:	/* Copy 2-3 bytes.  */
+	bf	30,1f
+
+	sth	4,0(10)
+	addi	10,10,2
+1:	/* Copy 1 byte.  */
+	bflr	31
+
+	stb	4,0(10)
+	blr
+
+	/* Handles copies of 0~8 bytes.  */
+	.align	4
+L(small):
+	mtocrf	0x01,5
+	bne	cr6,L(copy_tail)
+
+	stw	4,0(10)
+	stw	4,4(10)
+	blr
+
+END_GEN_TB (BP_SYM (memset),TB_TOCLESS)
+libc_hidden_builtin_def (memset)
+
+/* Copied from bzero.S to prevent the linker from inserting a stub
+   between bzero and memset.  */
+ENTRY (BP_SYM (__bzero))
+	CALL_MCOUNT 3
+	mr	r5,r4
+	li	r4,0
+	b	L(_memset)
+END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS)
+
+weak_alias (BP_SYM (__bzero), BP_SYM (bzero))
diff --git a/sysdeps/unix/sysv/linux/Makefile b/sysdeps/unix/sysv/linux/Makefile
index 91c123b2b3..9c53b2d228 100644
--- a/sysdeps/unix/sysv/linux/Makefile
+++ b/sysdeps/unix/sysv/linux/Makefile
@@ -12,7 +12,7 @@ CFLAGS-malloc.c += -DMORECORE_CLEARS=2
 endif
 
 ifeq ($(subdir),socket)
-sysdep_routines += internal_accept4
+sysdep_routines += internal_accept4 recvmmsg internal_recvmmsg
 endif
 
 ifeq ($(subdir),misc)
diff --git a/sysdeps/unix/sysv/linux/internal_recvmmsg.S b/sysdeps/unix/sysv/linux/internal_recvmmsg.S
new file mode 100644
index 0000000000..66c1357940
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/internal_recvmmsg.S
@@ -0,0 +1,14 @@
+#include <kernel-features.h>
+#include <sys/syscall.h>
+#if !defined __NR_recvmmsg && defined __NR_socketcall
+# define socket	recvmmsg
+# ifdef __ASSUME_RECVMMSG
+#  define __socket recvmmsg
+# else
+#  define __socket __internal_recvmmsg
+# endif
+# define NARGS 5
+# define NEED_CANCELLATION
+# define NO_WEAK_ALIAS
+# include <socket.S>
+#endif
diff --git a/sysdeps/unix/sysv/linux/kernel-features.h b/sysdeps/unix/sysv/linux/kernel-features.h
index 43783c1e26..b3f2456150 100644
--- a/sysdeps/unix/sysv/linux/kernel-features.h
+++ b/sysdeps/unix/sysv/linux/kernel-features.h
@@ -525,3 +525,8 @@
 #if __LINUX_KERNEL_VERSION >= 0x020620
 # define __ASSUME_F_GETOWN_EX	1
 #endif
+
+/* Support for the recvmmsg syscall was added in 2.6.33.  */
+#if __LINUX_KERNEL_VERSION >= 0x020621
+# define __ASSUME_RECVMMSG	1
+#endif
diff --git a/sysdeps/unix/sysv/linux/recvmmsg.c b/sysdeps/unix/sysv/linux/recvmmsg.c
new file mode 100644
index 0000000000..0c08171d4c
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/recvmmsg.c
@@ -0,0 +1,100 @@
+/* Copyright (C) 2010 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Andreas Schwab <schwab@redhat.com>, 2010.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include <sysdep-cancel.h>
+#include <sys/syscall.h>
+#include <kernel-features.h>
+
+
+#ifdef __NR_recvmmsg
+int
+recvmmsg (int fd, struct mmsghdr *vmessages, unsigned int vlen, int flags,
+	  const struct timespec *tmo)
+{
+  if (SINGLE_THREAD_P)
+    return INLINE_SYSCALL (recvmmsg, 5, fd, vmessages, vlen, flags, tmo);
+
+  int oldtype = LIBC_CANCEL_ASYNC ();
+
+  int result = INLINE_SYSCALL (recvmmsg, 5, fd, vmessages, vlen, flags, tmo);
+
+  LIBC_CANCEL_RESET (oldtype);
+
+  return result;
+}
+#elif defined __NR_socketcall
+# ifndef __ASSUME_RECVMMSG
+extern int __internal_recvmmsg (int fd, struct mmsghdr *vmessages,
+				unsigned int vlen, int flags,
+				const struct timespec *tmo)
+     attribute_hidden;
+
+static int have_recvmmsg;
+
+int
+recvmmsg (int fd, struct mmsghdr *vmessages, unsigned int vlen, int flags,
+	  const struct timespec *tmo)
+{
+  if (__builtin_expect (have_recvmmsg >= 0, 1))
+    {
+      int ret = __internal_recvmmsg (fd, vmessages, vlen, flags, tmo);
+      /* The kernel returns -EINVAL for unknown socket operations.
+	 We need to convert that error to an ENOSYS error.  */
+      if (__builtin_expect (ret < 0, 0)
+	  && have_recvmmsg == 0
+	  && errno == EINVAL)
+	{
+	  /* Try another call, this time with an invalid file
+	     descriptor and all other parameters cleared.  This call
+	     will not cause any harm and it will return
+	     immediately.  */
+	  ret = __internal_recvmmsg (-1, 0, 0, 0, 0);
+	  if (errno == EINVAL)
+	    {
+	      have_recvmmsg = -1;
+	      __set_errno (ENOSYS);
+	    }
+	  else
+	    {
+	      have_recvmmsg = 1;
+	      __set_errno (EINVAL);
+	    }
+	  return -1;
+	}
+      return ret;
+    }
+  __set_errno (ENOSYS);
+  return -1;
+}
+# else
+/* When __ASSUME_RECVMMSG recvmmsg is defined in internal_recvmmsg.S.  */
+# endif
+#else
+int
+recvmmsg (int fd, struct mmsghdr *vmessages, unsigned int vlen, int flags,
+	  const struct timespec *tmo)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (recvmmsg)
+#endif
diff --git a/sysdeps/unix/sysv/linux/sh/sh4/register-dump.h b/sysdeps/unix/sysv/linux/sh/sh4/register-dump.h
index e3c9c0e639..92df0858e1 100644
--- a/sysdeps/unix/sysv/linux/sh/sh4/register-dump.h
+++ b/sysdeps/unix/sysv/linux/sh/sh4/register-dump.h
@@ -1,5 +1,5 @@
 /* Dump registers.
-   Copyright (C) 1999, 2000, 2009 Free Software Foundation, Inc.
+   Copyright (C) 1999, 2000, 2009, 2010 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -58,7 +58,7 @@ register_dump (int fd, struct sigcontext *ctx)
 {
   char regs[22][8];
   char fpregs[34][8];
-  struct iovec iov[112];
+  struct iovec iov[22 * 2 + 34 * 2 + 2];
   size_t nr = 0;
 
 #define ADD_STRING(str) \
diff --git a/sysdeps/unix/sysv/linux/socketcall.h b/sysdeps/unix/sysv/linux/socketcall.h
index adf01b6e10..bab4e4a510 100644
--- a/sysdeps/unix/sysv/linux/socketcall.h
+++ b/sysdeps/unix/sysv/linux/socketcall.h
@@ -44,5 +44,6 @@
 #define SOCKOP_sendmsg		16
 #define SOCKOP_recvmsg		17
 #define SOCKOP_accept4		18
+#define SOCKOP_recvmmsg		19
 
 #endif /* sys/socketcall.h */
diff --git a/sysdeps/unix/sysv/linux/sys/timex.h b/sysdeps/unix/sysv/linux/sys/timex.h
index e10311f70f..13b94d6f4f 100644
--- a/sysdeps/unix/sysv/linux/sys/timex.h
+++ b/sysdeps/unix/sysv/linux/sys/timex.h
@@ -140,9 +140,9 @@ __BEGIN_DECLS
 extern int __adjtimex (struct timex *__ntx) __THROW;
 extern int adjtimex (struct timex *__ntx) __THROW;
 
-#if defined __GNUC__ && __GNUC__ >= 2
-extern int ntp_gettime (struct ntptimeval *__ntv)
-     __asm__ ("ntp_gettimex") __THROW;
+#ifdef __REDIRECT_NTH
+extern int __REDIRECT_NTH (ntp_gettime, (struct ntptimeval *__ntv),
+			   ntp_gettimex);
 #else
 extern int ntp_gettimex (struct ntptimeval *__ntv) __THROW;
 # define ntp_gettime ntp_gettimex
diff --git a/sysdeps/unix/sysv/linux/syscalls.list b/sysdeps/unix/sysv/linux/syscalls.list
index a1a449eb4b..a87906a4e3 100644
--- a/sysdeps/unix/sysv/linux/syscalls.list
+++ b/sysdeps/unix/sysv/linux/syscalls.list
@@ -53,7 +53,6 @@ prctl		EXTRA	prctl		i:iiiii	__prctl		prctl
 putpmsg		-	putpmsg		i:ippii	putpmsg
 query_module	EXTRA	query_module	i:sipip	query_module
 quotactl	EXTRA	quotactl	i:isip	quotactl
-recvmmsg	EXTRA	recvmmsg	Ci:ipiip	recvmmsg
 remap_file_pages -	remap_file_pages i:piiii	__remap_file_pages remap_file_pages
 sched_getp	-	sched_getparam	i:ip	__sched_getparam	sched_getparam
 sched_gets	-	sched_getscheduler	i:i	__sched_getscheduler	sched_getscheduler
diff --git a/sysdeps/x86_64/multiarch/init-arch.c b/sysdeps/x86_64/multiarch/init-arch.c
index efb89b6c92..f13a9f4b79 100644
--- a/sysdeps/x86_64/multiarch/init-arch.c
+++ b/sysdeps/x86_64/multiarch/init-arch.c
@@ -62,15 +62,15 @@ __init_cpu_features (void)
       unsigned int eax = __cpu_features.cpuid[COMMON_CPUID_INDEX_1].eax;
       unsigned int extended_family = (eax >> 20) & 0xff;
       unsigned int extended_model = (eax >> 12) & 0xf0;
-      if (__cpu_features.family == 0x0f)
+      if (family == 0x0f)
 	{
 	  family += extended_family;
 	  model += extended_model;
 	}
-      else if (__cpu_features.family == 0x06)
+      else if (family == 0x06)
 	{
 	  model += extended_model;
-	  switch (__cpu_features.model)
+	  switch (model)
 	    {
 	    case 0x1a:
 	    case 0x1e: