about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/i386/swapcontext.S
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2020-02-14 14:45:34 -0800
committerH.J. Lu <hjl.tools@gmail.com>2020-02-14 15:15:25 -0800
commit5d844e1b72513cf59b5e7c14295644efdcc66e44 (patch)
tree6fc7163de8a2cf23e252261d1f7dc360dca46535 /sysdeps/unix/sysv/linux/i386/swapcontext.S
parentf6a9b6b08ea0fddad48d908729f866d3c30955ed (diff)
downloadglibc-5d844e1b72513cf59b5e7c14295644efdcc66e44.tar.gz
glibc-5d844e1b72513cf59b5e7c14295644efdcc66e44.tar.xz
glibc-5d844e1b72513cf59b5e7c14295644efdcc66e44.zip
i386: Enable CET support in ucontext functions
1. getcontext and swapcontext are updated to save the caller's shadow
stack pointer and return address.
2. setcontext and swapcontext are updated to restore shadow stack and
jump to new context directly.
3. makecontext is updated to allocate a new shadow stack and set the
caller's return address to the helper code, L(exitcode).
4. Since we no longer save and restore EAX, ECX and EDX in getcontext,
setcontext and swapcontext, we can use them as scratch register slots
to enable CET in ucontext functions.

Since makecontext allocates a new shadow stack when making a new
context and kernel allocates a new shadow stack for clone/fork/vfork
syscalls, we track the current shadow stack base.  In setcontext and
swapcontext, if the target shadow stack base is the same as the current
shadow stack base, we unwind the shadow stack.  Otherwise it is a stack
switch and we look for a restore token.

We enable shadow stack at run-time only if program and all used shared
objects, including dlopened ones, are shadow stack enabled, which means
that they must be compiled with GCC 8 or above and glibc 2.28 or above.
We need to save and restore shadow stack only if shadow stack is enabled.
When caller of getcontext, setcontext, swapcontext and makecontext is
compiled with smaller ucontext_t, shadow stack won't be enabled at
run-time.  We check if shadow stack is enabled before accessing the
extended field in ucontext_t.

Tested on i386 CET/non-CET machines.

Reviewed-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'sysdeps/unix/sysv/linux/i386/swapcontext.S')
-rw-r--r--sysdeps/unix/sysv/linux/i386/swapcontext.S139
1 files changed, 139 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/swapcontext.S b/sysdeps/unix/sysv/linux/i386/swapcontext.S
index 090c2d8c3e..203eafa2e7 100644
--- a/sysdeps/unix/sysv/linux/i386/swapcontext.S
+++ b/sysdeps/unix/sysv/linux/i386/swapcontext.S
@@ -18,6 +18,7 @@
    <https://www.gnu.org/licenses/>.  */
 
 #include <sysdep.h>
+#include <asm/prctl.h>
 
 #include "ucontext_i.h"
 
@@ -76,6 +77,144 @@ ENTRY(__swapcontext)
 	movl	oFS(%eax), %edx
 	movw	%dx, %fs
 
+#if SHSTK_ENABLED
+	/* Check if Shadow Stack is enabled.  */
+	testl	$X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET
+	jz	L(no_shstk)
+
+	xorl	%eax, %eax
+	cmpl	%gs:SSP_BASE_OFFSET, %eax
+	jnz	L(shadow_stack_bound_recorded)
+
+	/* Get the base address and size of the default shadow stack
+	   which must be the current shadow stack since nothing has
+	   been recorded yet.  */
+	sub	$24, %esp
+	mov	%esp, %ecx
+	movl	$ARCH_CET_STATUS, %ebx
+	movl	$__NR_arch_prctl, %eax
+	ENTER_KERNEL
+	testl	%eax, %eax
+	jz	L(continue_no_err)
+
+	/* This should never happen.  */
+	hlt
+
+L(continue_no_err):
+	/* Record the base of the current shadow stack.  */
+	movl	8(%esp), %eax
+	movl	%eax, %gs:SSP_BASE_OFFSET
+	add	$24, %esp
+
+L(shadow_stack_bound_recorded):
+	/* Load address of the context data structure we save in.  */
+	movl	4(%esp), %eax
+
+	/* Load address of the context data structure we swap in  */
+	movl	8(%esp), %edx
+
+       /* If we unwind the stack, we can't undo stack unwinding.  Just
+	   save the target shadow stack pointer as the current shadow
+	   stack pointer.   */
+	movl	oSSP(%edx), %ecx
+	movl	%ecx, oSSP(%eax)
+
+	/* Save the current shadow stack base in ucontext.  */
+	movl	%gs:SSP_BASE_OFFSET, %ecx
+	movl	%ecx, (oSSP + 4)(%eax)
+
+	/* If the base of the target shadow stack is the same as the
+	   base of the current shadow stack, we unwind the shadow
+	   stack.  Otherwise it is a stack switch and we look for a
+	   restore token.  */
+	movl	oSSP(%edx), %esi
+	movl	%esi, %edi
+
+	/* Get the base of the target shadow stack.  */
+	movl	(oSSP + 4)(%edx), %ecx
+	cmpl	%gs:SSP_BASE_OFFSET, %ecx
+	je	L(unwind_shadow_stack)
+
+	/* Align the saved original shadow stack pointer to the next
+	   8 byte aligned boundary.  */
+	andl	$-8, %esi
+
+L(find_restore_token_loop):
+	/* Look for a restore token.  */
+	movl	-8(%esi), %ebx
+	andl	$-8, %ebx
+	cmpl	%esi, %ebx
+	je	L(restore_shadow_stack)
+
+	/* Try the next slot.  */
+	subl	$8, %esi
+	jmp	L(find_restore_token_loop)
+
+L(restore_shadow_stack):
+	/* The target shadow stack will be restored.  Save the current
+	   shadow stack pointer.  */
+	rdsspd	%ecx
+	movl	%ecx, oSSP(%eax)
+
+	/* Use the restore stoken to restore the target shadow stack.  */
+	rstorssp -8(%esi)
+
+	/* Save the restore token on the old shadow stack.  NB: This
+	   restore token may be checked by setcontext or swapcontext
+	   later.  */
+	saveprevssp
+
+	/* Record the new shadow stack base that was switched to.  */
+	movl	(oSSP + 4)(%edx), %ebx
+	movl	%ebx, %gs:SSP_BASE_OFFSET
+
+L(unwind_shadow_stack):
+	rdsspd	%ebx
+	subl	%edi, %ebx
+	je	L(skip_unwind_shadow_stack)
+	negl	%ebx
+	shrl	$2, %ebx
+	movl	$255, %esi
+L(loop):
+	cmpl	%esi, %ebx
+	cmovb	%ebx, %esi
+	incsspd	%esi
+	subl	%esi, %ebx
+	ja	L(loop)
+
+L(skip_unwind_shadow_stack):
+
+	/* Load the new stack pointer.  */
+	movl	oESP(%edx), %esp
+
+	/* Load the values of all the preserved registers (except ESP).  */
+	movl	oEDI(%edx), %edi
+	movl	oESI(%edx), %esi
+	movl	oEBP(%edx), %ebp
+	movl	oEBX(%edx), %ebx
+
+	/* Get the return address set with getcontext.  */
+	movl	oEIP(%edx), %ecx
+
+	/* Check if return address is valid for the case when setcontext
+	   is invoked from L(exitcode) with linked context.  */
+	rdsspd	%eax
+	cmpl	(%eax), %ecx
+	/* Clear EAX to indicate success.  NB: Don't use xorl to keep
+	   EFLAGS for jne.  */
+	movl	$0, %eax
+	jne	L(jmp)
+	/* Return to the new context if return address valid.  */
+	pushl	%ecx
+	ret
+
+L(jmp):
+	/* Jump to the new context directly.  */
+	jmp	*%ecx
+
+L(no_shstk):
+#endif
+
 	/* Fetch the address to return to.  */
 	movl	oEIP(%eax), %ecx