diff options
author | Ulrich Drepper <drepper@redhat.com> | 2009-07-19 14:54:56 -0700 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2009-07-19 14:54:56 -0700 |
commit | 515a8908cedcf7432270f410e4a749e4ce07a072 (patch) | |
tree | 7e4631b6ee0ef5a723ca3a24b7032fa80601d72e /nptl | |
parent | e2dca2fea3f1a0a7b05fd10589f469496f9c42a3 (diff) | |
download | glibc-515a8908cedcf7432270f410e4a749e4ce07a072.tar.gz glibc-515a8908cedcf7432270f410e4a749e4ce07a072.tar.xz glibc-515a8908cedcf7432270f410e4a749e4ce07a072.zip |
Make x86-64 pthread_cond_timedwait more robust.
It just happens that __pthread_enable_asynccancel doesn't modify the $rdi register. But this isn't guaranteed. Hence we reload the register after the calls.
Diffstat (limited to 'nptl')
-rw-r--r-- | nptl/ChangeLog | 5 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S | 2 |
2 files changed, 7 insertions, 0 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog index e9cac73459..785100d852 100644 --- a/nptl/ChangeLog +++ b/nptl/ChangeLog @@ -1,3 +1,8 @@ +2009-07-19 Ulrich Drepper <drepper@redhat.com> + + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S + (__pthread_cond_timedwait): Make more robust. + 2009-07-18 Ulrich Drepper <drepper@redhat.com> * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S index 45a9a4213b..1b19fdb8dc 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S @@ -153,6 +153,7 @@ __pthread_cond_timedwait: .LcleanupSTART1: 34: callq __pthread_enable_asynccancel movl %eax, (%rsp) + movq 8(%rsp), %rdi movq %r13, %r10 cmpq $-1, dep_mutex(%rdi) @@ -456,6 +457,7 @@ __pthread_cond_timedwait: .LcleanupSTART2: 4: callq __pthread_enable_asynccancel movl %eax, (%rsp) + movq 8(%rsp), %rdi leaq 32(%rsp), %r10 cmpq $-1, dep_mutex(%rdi) |