diff options
author | Liubov Dmitrieva <liubov.dmitrieva@intel.com> | 2013-01-25 18:40:50 +0400 |
---|---|---|
committer | Liubov Dmitrieva <ldmitrie@sourceware.org> | 2013-10-23 19:07:37 +0400 |
commit | 7838e39881b61895ae13a15cf86ad041c75593de (patch) | |
tree | 973756a8835f2f7840a7b8378ea80f4af6cb10ff | |
parent | 36aee6bb45b54f2d242e256b5e596838c18c0b85 (diff) | |
download | glibc-7838e39881b61895ae13a15cf86ad041c75593de.tar.gz glibc-7838e39881b61895ae13a15cf86ad041c75593de.tar.xz glibc-7838e39881b61895ae13a15cf86ad041c75593de.zip |
Intel MPX support for x86_64 and x86_32 pthread routines.
Always use INIT bounds in __tls_get_addr. Set bounds manually in _Unwind_Resume.
-rw-r--r-- | elf/dl-tls.c | 3 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S | 82 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S | 63 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S | 9 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S | 49 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S | 37 | ||||
-rw-r--r-- | sysdeps/gnu/unwind-resume.c | 3 |
7 files changed, 238 insertions, 8 deletions
diff --git a/elf/dl-tls.c b/elf/dl-tls.c index 576d9a1465..ee84fa6dab 100644 --- a/elf/dl-tls.c +++ b/elf/dl-tls.c @@ -767,6 +767,9 @@ update_get_addr (GET_ADDR_ARGS) void * __tls_get_addr (GET_ADDR_ARGS) { +#ifdef __CHKP__ + GET_ADDR_PARAM = __bnd_init_ptr_bounds(GET_ADDR_PARAM); +#endif dtv_t *dtv = THREAD_DTV (); if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0)) diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S index a6d6bc460a..c0b107aa12 100644 --- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S +++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S @@ -94,6 +94,13 @@ __pthread_cond_timedwait: je .Lreltmo #endif +#if defined __CHKP__ || defined __CHKWR__ + bndldx (%esp,%ebx,1), %bnd0 + bndldx 28(%esp,%ebp,1), %bnd2 + bndmov %bnd0, 48(%esp) + bndmov %bnd2, 80(%esp) +#endif + /* Get internal lock. */ movl $1, %edx xorl %eax, %eax @@ -109,12 +116,24 @@ __pthread_cond_timedwait: different value in there this is a bad user bug. */ 2: cmpl $-1, dep_mutex(%ebx) movl 24(%esp), %eax +#if defined __CHKP__ || defined __CHKWR__ + bndldx 4(%esp,%eax,1), %bnd1 + bndmov %bnd1, 64(%esp) +#endif je 17f movl %eax, dep_mutex(%ebx) /* Unlock the mutex. */ 17: xorl %edx, %edx +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif testl %eax, %eax jne 16f @@ -296,9 +315,25 @@ __pthread_cond_timedwait: should always succeed or else the kernel did not lock the mutex correctly. */ movl dep_mutex(%ebx), %eax +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif xorl %edx, %edx +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif jmp 8b 28: addl $1, wakeup_seq(%ebx) @@ -356,8 +391,15 @@ __pthread_cond_timedwait: movl 16(%esp), %ecx testl %ecx, %ecx jnz 27f - +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_cond_lock +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif 26: addl $FRAME_SIZE, %esp cfi_adjust_cfa_offset(-FRAME_SIZE) @@ -388,7 +430,16 @@ __pthread_cond_timedwait: cfi_restore_state -27: call __pthread_mutex_cond_lock_adjust +27: +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif + call __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif xorl %eax, %eax jmp 26b @@ -529,7 +580,15 @@ __pthread_cond_timedwait: /* Unlock the mutex. */ 117: xorl %edx, %edx +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif testl %eax, %eax jne 16b @@ -899,10 +958,27 @@ __condvar_tw_cleanup: cmpl %ebx, %gs:TID jne 8f /* We managed to get the lock. Fix it up before returning. */ +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif jmp 9f -8: call __pthread_mutex_cond_lock +8: +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif + call __pthread_mutex_cond_lock +#if defined __CHKP__ || defined __CHKWR__ + bndmov 48(%esp), %bnd0 + bndmov 64(%esp), %bnd1 + bndmov 80(%esp), %bnd2 +#endif 9: movl %esi, (%esp) .LcallUR: diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S index 9695dcb0ae..0a9125a14a 100644 --- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S +++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S @@ -60,6 +60,10 @@ __pthread_cond_wait: xorl %esi, %esi movl 20(%esp), %ebx +#if defined __CHKP__ || defined __CHKWR__ + bndldx (%esp,%ebx,1), %bnd0 + bndmov %bnd0, 32(%esp) +#endif LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx) @@ -78,12 +82,23 @@ __pthread_cond_wait: different value in there this is a bad user bug. */ 2: cmpl $-1, dep_mutex(%ebx) movl 24(%esp), %eax +#if defined __CHKP__ || defined __CHKWR__ + bndldx 4(%esp,%eax,1), %bnd1 + bndmov %bnd1, 48(%esp) +#endif je 15f movl %eax, dep_mutex(%ebx) /* Unlock the mutex. */ 15: xorl %edx, %edx +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%esp), %bnd0 + bndmov 48(%esp), %bnd1 +#endif testl %eax, %eax jne 12f @@ -270,7 +285,14 @@ __pthread_cond_wait: testl %ecx, %ecx jnz 21f +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_cond_lock +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%esp), %bnd0 + bndmov 48(%esp), %bnd1 +#endif 20: addl $FRAME_SIZE, %esp cfi_adjust_cfa_offset(-FRAME_SIZE); @@ -292,7 +314,15 @@ __pthread_cond_wait: cfi_restore_state -21: call __pthread_mutex_cond_lock_adjust +21: +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif + call __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%esp), %bnd0 + bndmov 48(%esp), %bnd1 +#endif xorl %eax, %eax jmp 20b @@ -308,9 +338,23 @@ __pthread_cond_wait: should always succeed or else the kernel did not lock the mutex correctly. */ movl dep_mutex(%ebx), %eax +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%esp), %bnd0 + bndmov 48(%esp), %bnd1 +#endif xorl %edx, %edx +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%esp), %bnd0 + bndmov 48(%esp), %bnd1 +#endif jmp 8b /* Initial locking failed. */ @@ -581,10 +625,25 @@ __condvar_w_cleanup: cmpl %ebx, %gs:TID jne 8f /* We managed to get the lock. Fix it up before returning. */ +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif call __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%esp), %bnd0 + bndmov 48(%esp), %bnd1 +#endif jmp 9f -8: call __pthread_mutex_cond_lock +8: +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif + call __pthread_mutex_cond_lock +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%esp), %bnd0 + bndmov 48(%esp), %bnd1 +#endif 9: movl %esi, (%esp) .LcallUR: diff --git a/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S b/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S index b405b9e21e..9e67f270c0 100644 --- a/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S +++ b/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S @@ -114,6 +114,9 @@ __pthread_once: jne 7f leal 8(%esp), %eax +#if defined __CHKP__ || defined __CHKWR__ + bndldx 8(%esp,%eax,1), %bnd0 +#endif call HIDDEN_JUMPTARGET(__pthread_register_cancel) /* Call the user-provided initialization function. */ @@ -121,6 +124,9 @@ __pthread_once: /* Pop the cleanup handler. */ leal 8(%esp), %eax +#if defined __CHKP__ || defined __CHKWR__ + bndldx 8(%esp,%eax,1), %bnd0 +#endif call HIDDEN_JUMPTARGET(__pthread_unregister_cancel) addl $UNWINDBUFSIZE+8, %esp cfi_adjust_cfa_offset (-UNWINDBUFSIZE-8) @@ -168,6 +174,9 @@ __pthread_once: ENTER_KERNEL leal 8(%esp), %eax +#if defined __CHKP__ || defined __CHKWR__ + bndldx 8(%esp,%eax,1), %bnd0 +#endif call HIDDEN_JUMPTARGET (__pthread_unwind_next) /* NOTREACHED */ hlt diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S index 6c1a75fd47..e30845dfce 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S @@ -99,6 +99,12 @@ __pthread_cond_timedwait: movq %rsi, 16(%rsp) movq %rdx, %r13 +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd0, 72(%rsp) + bndmov %bnd1, 88(%rsp) + bndmov %bnd2, 104(%rsp) +#endif + je 22f mov %RSI_LP, dep_mutex(%rdi) @@ -128,7 +134,15 @@ __pthread_cond_timedwait: /* Unlock the mutex. */ 32: movq 16(%rsp), %rdi xorl %esi, %esi +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif callq __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 72(%rsp), %bnd0 + bndmov 88(%rsp), %bnd1 + bndmov 104(%rsp), %bnd2 +#endif testl %eax, %eax jne 46f @@ -338,7 +352,15 @@ __pthread_cond_timedwait: testb %r15b, %r15b jnz 64f +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif callq __pthread_mutex_cond_lock +#if defined __CHKP__ || defined __CHKWR__ + bndmov 72(%rsp), %bnd0 + bndmov 88(%rsp), %bnd1 + bndmov 104(%rsp), %bnd2 +#endif 63: testq %rax, %rax cmoveq %r14, %rax @@ -362,7 +384,16 @@ __pthread_cond_timedwait: cfi_restore_state -64: callq __pthread_mutex_cond_lock_adjust +64: +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif + callq __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 72(%rsp), %bnd0 + bndmov 88(%rsp), %bnd1 + bndmov 104(%rsp), %bnd2 +#endif movq %r14, %rax jmp 48b @@ -457,7 +488,15 @@ __pthread_cond_timedwait: /* Unlock the mutex. */ 2: movq 16(%rsp), %rdi xorl %esi, %esi +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif callq __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 72(%rsp), %bnd0 + bndmov 88(%rsp), %bnd1 + bndmov 104(%rsp), %bnd2 +#endif testl %eax, %eax jne 46b @@ -786,7 +825,15 @@ __condvar_cleanup2: cmpl %eax, %fs:TID jne 7f /* We managed to get the lock. Fix it up before returning. */ +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif callq __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 72(%rsp), %bnd0 + bndmov 88(%rsp), %bnd1 + bndmov 104(%rsp), %bnd2 +#endif jmp 8f 7: callq __pthread_mutex_cond_lock diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S index f0f6683b7f..3338497cd7 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S @@ -74,6 +74,11 @@ __pthread_cond_wait: movq %rdi, 8(%rsp) movq %rsi, 16(%rsp) +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd0, 32(%rsp) + bndmov %bnd1, 48(%rsp) +#endif + je 15f mov %RSI_LP, dep_mutex(%rdi) @@ -91,7 +96,14 @@ __pthread_cond_wait: /* Unlock the mutex. */ 2: movq 16(%rsp), %rdi xorl %esi, %esi +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif callq __pthread_mutex_unlock_usercnt +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%rsp), %bnd0 + bndmov 48(%rsp), %bnd1 +#endif testl %eax, %eax jne 12f @@ -256,7 +268,14 @@ __pthread_cond_wait: testb %r8b, %r8b jnz 18f +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif callq __pthread_mutex_cond_lock +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%rsp), %bnd0 + bndmov 48(%rsp), %bnd1 +#endif 14: leaq FRAME_SIZE(%rsp), %rsp cfi_adjust_cfa_offset(-FRAME_SIZE) @@ -266,7 +285,15 @@ __pthread_cond_wait: cfi_adjust_cfa_offset(FRAME_SIZE) -18: callq __pthread_mutex_cond_lock_adjust +18: +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif + callq __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%rsp), %bnd0 + bndmov 48(%rsp), %bnd1 +#endif xorl %eax, %eax jmp 14b @@ -510,10 +537,16 @@ __condvar_cleanup1: cmpl %eax, %fs:TID jne 7f /* We managed to get the lock. Fix it up before returning. */ +#if defined __CHKP__ || defined __CHKWR__ + bndmov %bnd1, %bnd0 +#endif callq __pthread_mutex_cond_lock_adjust +#if defined __CHKP__ || defined __CHKWR__ + bndmov 32(%rsp), %bnd0 + bndmov 48(%rsp), %bnd1 +#endif jmp 8f - 7: callq __pthread_mutex_cond_lock 8: movq 24(%rsp), %rdi diff --git a/sysdeps/gnu/unwind-resume.c b/sysdeps/gnu/unwind-resume.c index df845cd45b..19e06b20d2 100644 --- a/sysdeps/gnu/unwind-resume.c +++ b/sysdeps/gnu/unwind-resume.c @@ -46,6 +46,9 @@ init (void) void _Unwind_Resume (struct _Unwind_Exception *exc) { +#ifdef __CHKP__ + exc = (struct _Unwind_Exception *) __bnd_set_ptr_bounds (exc, sizeof (struct _Unwind_Exception)); +#endif if (__builtin_expect (libgcc_s_resume == NULL, 0)) init (); libgcc_s_resume (exc); |