about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2007-07-12 18:26:36 +0000
committerJakub Jelinek <jakub@redhat.com>2007-07-12 18:26:36 +0000
commit0ecb606cb6cf65de1d9fc8a919bceb4be476c602 (patch)
tree2ea1f8305970753e4a657acb2ccc15ca3eec8e2c /sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
parent7d58530341304d403a6626d7f7a1913165fe2f32 (diff)
downloadglibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.gz
glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.xz
glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.zip
2.5-18.1
Diffstat (limited to 'sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h150
1 files changed, 141 insertions, 9 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h b/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
index 38a376fa90..aab4b721c0 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
@@ -1,5 +1,5 @@
-/* Copyright (C) 1992, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
-   Free Software Foundation, Inc.
+/* Copyright (C) 1992,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006
+	Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
 #define _LINUX_POWERPC_SYSDEP_H 1
 
 #include <sysdeps/unix/powerpc/sysdep.h>
+#include <tls.h>
 
 /* Define __set_errno() for INLINE_SYSCALL macro below.  */
 #ifndef __ASSEMBLER__
@@ -61,12 +62,118 @@
 #ifdef __ASSEMBLER__
 
 /* This seems to always be the case on PPC.  */
-#define ALIGNARG(log2) log2
+# define ALIGNARG(log2) log2
 /* For ELF we need the `.type' directive to make shared libs work right.  */
-#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg;
-#define ASM_SIZE_DIRECTIVE(name) .size name,.-name
+# define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg;
+# define ASM_SIZE_DIRECTIVE(name) .size name,.-name
 
-#endif	/* __ASSEMBLER__ */
+#endif /* __ASSEMBLER__ */
+
+/* This version is for kernels that implement system calls that
+   behave like function calls as far as register saving.
+   It falls back to the syscall in the case that the vDSO doesn't
+   exist or fails for ENOSYS */
+#ifdef SHARED
+# define INLINE_VSYSCALL(name, nr, args...) \
+  ({									      \
+    __label__ out;							      \
+    __label__ iserr;							      \
+    INTERNAL_SYSCALL_DECL (sc_err);					      \
+    long int sc_ret;							      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      {									      \
+	sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args);   \
+	if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
+	  goto out;							      \
+	if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS)		      \
+	  goto iserr;							      \
+      }									      \
+									      \
+    sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args);		      \
+    if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
+      {									      \
+      iserr:								      \
+        __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));		      \
+        sc_ret = -1L;							      \
+      }									      \
+  out:									      \
+    sc_ret;								      \
+  })
+#else
+# define INLINE_VSYSCALL(name, nr, args...) \
+  INLINE_SYSCALL (name, nr, ##args)
+#endif
+
+#ifdef SHARED
+# define INTERNAL_VSYSCALL(name, err, nr, args...) \
+  ({									      \
+    __label__ out;							      \
+    long int v_ret;							      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      {									      \
+	v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
+	if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err)			      \
+	    || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS)		      \
+	  goto out;							      \
+      }									      \
+    v_ret = INTERNAL_SYSCALL (name, err, nr, ##args);			      \
+  out:									      \
+    v_ret;								      \
+  })
+#else
+# define INTERNAL_VSYSCALL(name, err, nr, args...) \
+  INTERNAL_SYSCALL (name, err, nr, ##args)
+#endif
+
+/* This version is for internal uses when there is no desire
+   to set errno */
+#define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...)	      \
+  ({									      \
+    long int sc_ret = ENOSYS;						      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
+    else								      \
+      err = 1 << 28;							      \
+    sc_ret;								      \
+  })
+
+/* List of system calls which are supported as vsyscalls.  */
+#define HAVE_CLOCK_GETRES_VSYSCALL	1
+#define HAVE_CLOCK_GETTIME_VSYSCALL	1
+
+/* Define a macro which expands inline into the wrapper code for a system
+   call. This use is for internal calls that do not need to handle errors
+   normally. It will never touch errno. This returns just what the kernel
+   gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
+   the negation of the return value in the kernel gets reverted.  */
+
+#define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \
+  ({									\
+    register void *r0  __asm__ ("r0");					\
+    register long int r3  __asm__ ("r3");				\
+    register long int r4  __asm__ ("r4");				\
+    register long int r5  __asm__ ("r5");				\
+    register long int r6  __asm__ ("r6");				\
+    register long int r7  __asm__ ("r7");				\
+    register long int r8  __asm__ ("r8");				\
+    LOADARGS_##nr (funcptr, args);					\
+    __asm__ __volatile__						\
+      ("mtctr %0\n\t"							\
+       "bctrl\n\t"							\
+       "mfcr  %0\n\t"							\
+       "0:"								\
+       : "=&r" (r0),							\
+         "=&r" (r3), "=&r" (r4), "=&r" (r5),				\
+         "=&r" (r6), "=&r" (r7), "=&r" (r8)				\
+       : ASM_INPUT_##nr							\
+       : "r9", "r10", "r11", "r12",					\
+         "cr0", "ctr", "lr", "memory");					\
+	  err = (long int) r0;						\
+    (int) r3;								\
+  })
 
 #undef INLINE_SYSCALL
 
@@ -100,7 +207,7 @@
     register long int r6  __asm__ ("r6");				\
     register long int r7  __asm__ ("r7");				\
     register long int r8  __asm__ ("r8");				\
-    LOADARGS_##nr(name, args);						\
+    LOADARGS_##nr (name, ##args);					\
     __asm__ __volatile__						\
       ("sc\n\t"								\
        "mfcr  %0\n\t"							\
@@ -115,14 +222,14 @@
     (int) r3;  \
   })
 #define INTERNAL_SYSCALL(name, err, nr, args...)			\
-  INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args)
+  INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
 
 #undef INTERNAL_SYSCALL_DECL
 #define INTERNAL_SYSCALL_DECL(err) long int err
 
 #undef INTERNAL_SYSCALL_ERROR_P
 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
-  (__builtin_expect (err & (1 << 28), 0))
+  ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
 
 #undef INTERNAL_SYSCALL_ERRNO
 #define INTERNAL_SYSCALL_ERRNO(val, err)     (val)
@@ -180,4 +287,29 @@
 #define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
 #define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
 
+
+/* Pointer mangling support.  */
+#if defined NOT_IN_libc && defined IS_IN_rtld
+/* We cannot use the thread descriptor because in ld.so we use setjmp
+   earlier than the descriptor is initialized.  */
+#else
+# ifdef __ASSEMBLER__
+#  define PTR_MANGLE(reg, tmpreg) \
+	ld	tmpreg,POINTER_GUARD(r13); \
+	xor	reg,tmpreg,reg
+#  define PTR_MANGLE2(reg, tmpreg) \
+	xor	reg,tmpreg,reg
+#  define PTR_MANGLE3(destreg, reg, tmpreg) \
+	ld	tmpreg,POINTER_GUARD(r13); \
+	xor	destreg,tmpreg,reg
+#  define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
+#  define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
+#  define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
+# else
+#  define PTR_MANGLE(var) \
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
+#  define PTR_DEMANGLE(var)	PTR_MANGLE (var)
+# endif
+#endif
+
 #endif /* linux/powerpc/powerpc64/sysdep.h */