about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h138
1 files changed, 133 insertions, 5 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h b/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
index 2ee3e60229..c42efbabdd 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 1992,1997-2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 1992,1997-2003,2004,2005,2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
 #define _LINUX_POWERPC_SYSDEP_H 1
 
 #include <sysdeps/unix/powerpc/sysdep.h>
+#include <tls.h>
 
 /* Some systen calls got renamed over time, but retained the same semantics.
    Handle them here so they can be catched by both C and assembler stubs in
@@ -54,6 +55,109 @@
 
 # include <errno.h>
 
+# ifdef SHARED
+#  define INLINE_VSYSCALL(name, nr, args...) \
+  ({									      \
+    __label__ out;							      \
+    __label__ iserr;							      \
+    INTERNAL_SYSCALL_DECL (sc_err);					      \
+    long int sc_ret;							      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      {									      \
+	sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args);   \
+	if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
+	  goto out;							      \
+	if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS)		      \
+	  goto iserr;							      \
+      }									      \
+									      \
+    sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args);		      \
+    if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
+      {									      \
+      iserr:								      \
+        __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));		      \
+        sc_ret = -1L;							      \
+      }									      \
+  out:									      \
+    sc_ret;								      \
+  })
+# else
+#  define INLINE_VSYSCALL(name, nr, args...) \
+  INLINE_SYSCALL (name, nr, ##args)
+# endif
+
+# ifdef SHARED
+#  define INTERNAL_VSYSCALL(name, err, nr, args...) \
+  ({									      \
+    __label__ out;							      \
+    long int v_ret;							      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      {									      \
+	v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
+	if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err)			      \
+	    || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS)		      \
+	  goto out;							      \
+      }									      \
+    v_ret = INTERNAL_SYSCALL (name, err, nr, ##args);			      \
+  out:									      \
+    v_ret;								      \
+  })
+# else
+#  define INTERNAL_VSYSCALL(name, err, nr, args...) \
+  INTERNAL_SYSCALL (name, err, nr, ##args)
+# endif
+
+# define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...)	      \
+  ({									      \
+    long int sc_ret = ENOSYS;						      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
+    else								      \
+      err = 1 << 28;							      \
+    sc_ret;								      \
+  })
+
+/* List of system calls which are supported as vsyscalls.  */
+# define HAVE_CLOCK_GETRES_VSYSCALL	1
+# define HAVE_CLOCK_GETTIME_VSYSCALL	1
+
+/* Define a macro which expands inline into the wrapper code for a VDSO
+   call. This use is for internal calls that do not need to handle errors
+   normally. It will never touch errno.
+   On powerpc a system call basically clobbers the same registers like a
+   function call, with the exception of LR (which is needed for the
+   "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
+   an error return status).  */
+# define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \
+  ({									      \
+    register void *r0  __asm__ ("r0");					      \
+    register long int r3  __asm__ ("r3");				      \
+    register long int r4  __asm__ ("r4");				      \
+    register long int r5  __asm__ ("r5");				      \
+    register long int r6  __asm__ ("r6");				      \
+    register long int r7  __asm__ ("r7");				      \
+    register long int r8  __asm__ ("r8");				      \
+    register long int r9  __asm__ ("r9");				      \
+    register long int r10 __asm__ ("r10");				      \
+    register long int r11 __asm__ ("r11");				      \
+    register long int r12 __asm__ ("r12");				      \
+    LOADARGS_##nr (funcptr, args);					      \
+    __asm__ __volatile__						      \
+      ("mtctr %0\n\t"							      \
+       "bctrl\n\t"							      \
+       "mfcr %0"							      \
+       : "=&r" (r0),							      \
+	 "=&r" (r3), "=&r" (r4), "=&r" (r5),  "=&r" (r6),  "=&r" (r7),	      \
+	 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12)	      \
+       : ASM_INPUT_##nr							      \
+       : "cr0", "ctr", "lr", "memory");					      \
+    err = (long int) r0;						      \
+    (int) r3;								      \
+  })
+
 # undef INLINE_SYSCALL
 # define INLINE_SYSCALL(name, nr, args...)				\
   ({									\
@@ -92,7 +196,7 @@
     register long int r10 __asm__ ("r10");				\
     register long int r11 __asm__ ("r11");				\
     register long int r12 __asm__ ("r12");				\
-    LOADARGS_##nr(name, args);						\
+    LOADARGS_##nr(name, args);					\
     __asm__ __volatile__						\
       ("sc   \n\t"							\
        "mfcr %0"							\
@@ -109,16 +213,16 @@
 
 # undef INTERNAL_SYSCALL_ERROR_P
 # define INTERNAL_SYSCALL_ERROR_P(val, err) \
-  (__builtin_expect (err & (1 << 28), 0))
+  ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
 
 # undef INTERNAL_SYSCALL_ERRNO
 # define INTERNAL_SYSCALL_ERRNO(val, err)     (val)
 
-# define LOADARGS_0(name, dummy) \
+# define LOADARGS_0(name, dummy)					      \
 	r0 = name
 # define LOADARGS_1(name, __arg1) \
 	long int arg1 = (long int) (__arg1);	\
-	LOADARGS_0(name, 0); \
+  LOADARGS_0(name, 0);					   \
 	extern void __illegally_sized_syscall_arg1 (void); \
 	if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
 	  __illegally_sized_syscall_arg1 (); \
@@ -170,4 +274,28 @@
 #endif /* __ASSEMBLER__ */
 
 
+/* Pointer mangling support.  */
+#if defined NOT_IN_libc && defined IS_IN_rtld
+/* We cannot use the thread descriptor because in ld.so we use setjmp
+   earlier than the descriptor is initialized.  */
+#else
+# ifdef __ASSEMBLER__
+#  define PTR_MANGLE(reg, tmpreg) \
+	lwz	tmpreg,POINTER_GUARD(r2); \
+	xor	reg,tmpreg,reg
+#  define PTR_MANGLE2(reg, tmpreg) \
+	xor	reg,tmpreg,reg
+#  define PTR_MANGLE3(destreg, reg, tmpreg) \
+	lwz	tmpreg,POINTER_GUARD(r2); \
+	xor	destreg,tmpreg,reg
+#  define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
+#  define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
+#  define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
+# else
+#  define PTR_MANGLE(var) \
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
+#  define PTR_DEMANGLE(var)	PTR_MANGLE (var)
+# endif
+#endif
+
 #endif /* linux/powerpc/powerpc32/sysdep.h */