about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h114
1 files changed, 110 insertions, 4 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h b/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
index 546e712edc..470da5aa6e 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
@@ -55,6 +55,109 @@
 
 # include <errno.h>
 
+# ifdef SHARED
+#  define INLINE_VSYSCALL(name, nr, args...) \
+  ({									      \
+    __label__ out;							      \
+    __label__ iserr;							      \
+    INTERNAL_SYSCALL_DECL (sc_err);					      \
+    long int sc_ret;							      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      {									      \
+	sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args);   \
+	if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
+	  goto out;							      \
+	if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS)		      \
+	  goto iserr;							      \
+      }									      \
+									      \
+    sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args);		      \
+    if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
+      {									      \
+      iserr:								      \
+        __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));		      \
+        sc_ret = -1L;							      \
+      }									      \
+  out:									      \
+    sc_ret;								      \
+  })
+# else
+#  define INLINE_VSYSCALL(name, nr, args...) \
+  INLINE_SYSCALL (name, nr, ##args)
+# endif
+
+# ifdef SHARED
+#  define INTERNAL_VSYSCALL(name, err, nr, args...) \
+  ({									      \
+    __label__ out;							      \
+    long int v_ret;							      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      {									      \
+	v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
+	if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err)			      \
+	    || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS)		      \
+	  goto out;							      \
+      }									      \
+    v_ret = INTERNAL_SYSCALL (name, err, nr, ##args);			      \
+  out:									      \
+    v_ret;								      \
+  })
+# else
+#  define INTERNAL_VSYSCALL(name, err, nr, args...) \
+  INTERNAL_SYSCALL (name, err, nr, ##args)
+# endif
+
+# define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...)	      \
+  ({									      \
+    long int sc_ret = ENOSYS;						      \
+									      \
+    if (__vdso_##name != NULL)						      \
+      sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
+    else								      \
+      err = 1 << 28;							      \
+    sc_ret;								      \
+  })
+
+/* List of system calls which are supported as vsyscalls.  */
+# define HAVE_CLOCK_GETRES_VSYSCALL	1
+# define HAVE_CLOCK_GETTIME_VSYSCALL	1
+
+/* Define a macro which expands inline into the wrapper code for a VDSO
+   call. This use is for internal calls that do not need to handle errors
+   normally. It will never touch errno.
+   On powerpc a system call basically clobbers the same registers like a
+   function call, with the exception of LR (which is needed for the
+   "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
+   an error return status).  */
+# define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \
+  ({									      \
+    register void *r0  __asm__ ("r0");					      \
+    register long int r3  __asm__ ("r3");				      \
+    register long int r4  __asm__ ("r4");				      \
+    register long int r5  __asm__ ("r5");				      \
+    register long int r6  __asm__ ("r6");				      \
+    register long int r7  __asm__ ("r7");				      \
+    register long int r8  __asm__ ("r8");				      \
+    register long int r9  __asm__ ("r9");				      \
+    register long int r10 __asm__ ("r10");				      \
+    register long int r11 __asm__ ("r11");				      \
+    register long int r12 __asm__ ("r12");				      \
+    LOADARGS_##nr (funcptr, args);					      \
+    __asm__ __volatile__						      \
+      ("mtctr %0\n\t"							      \
+       "bctrl\n\t"							      \
+       "mfcr %0"							      \
+       : "=&r" (r0),							      \
+	 "=&r" (r3), "=&r" (r4), "=&r" (r5),  "=&r" (r6),  "=&r" (r7),	      \
+	 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12)	      \
+       : ASM_INPUT_##nr							      \
+       : "cr0", "ctr", "lr", "memory");					      \
+    err = (long int) r0;						      \
+    (int) r3;								      \
+  })
+
 # undef INLINE_SYSCALL
 # define INLINE_SYSCALL(name, nr, args...)				\
   ({									\
@@ -93,7 +196,7 @@
     register long int r10 __asm__ ("r10");				\
     register long int r11 __asm__ ("r11");				\
     register long int r12 __asm__ ("r12");				\
-    LOADARGS_##nr(name, args);						\
+    LOADARGS_##nr(name, args);					\
     __asm__ __volatile__						\
       ("sc   \n\t"							\
        "mfcr %0"							\
@@ -115,11 +218,11 @@
 # undef INTERNAL_SYSCALL_ERRNO
 # define INTERNAL_SYSCALL_ERRNO(val, err)     (val)
 
-# define LOADARGS_0(name, dummy) \
+# define LOADARGS_0(name, dummy)					      \
 	r0 = name
 # define LOADARGS_1(name, __arg1) \
 	long int arg1 = (long int) (__arg1);	\
-	LOADARGS_0(name, 0); \
+  LOADARGS_0(name, 0);					   \
 	extern void __illegally_sized_syscall_arg1 (void); \
 	if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
 	  __illegally_sized_syscall_arg1 (); \
@@ -180,10 +283,13 @@
 #  define PTR_MANGLE(reg, tmpreg) \
 	lwz	tmpreg,POINTER_GUARD(r2); \
 	xor	reg,tmpreg,reg
+#  define PTR_MANGLE2(reg, tmpreg) \
+	xor	reg,tmpreg,reg
 #  define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
+#  define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
 # else
 #  define PTR_MANGLE(var) \
-  (var) = (void *) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
 #  define PTR_DEMANGLE(var)	PTR_MANGLE (var)
 # endif
 #endif