about summary refs log tree commit diff
path: root/REORG.TODO/sysdeps/x86_64/_mcount.S
diff options
context:
space:
mode:
authorZack Weinberg <zackw@panix.com>2017-06-08 15:39:03 -0400
committerZack Weinberg <zackw@panix.com>2017-06-08 15:39:03 -0400
commit5046dbb4a7eba5eccfd258f92f4735c9ffc8d069 (patch)
tree4470480d904b65cf14ca524f96f79eca818c3eaf /REORG.TODO/sysdeps/x86_64/_mcount.S
parent199fc19d3aaaf57944ef036e15904febe877fc93 (diff)
downloadglibc-zack/build-layout-experiment.tar.gz
glibc-zack/build-layout-experiment.tar.xz
glibc-zack/build-layout-experiment.zip
Prepare for radical source tree reorganization. zack/build-layout-experiment
All top-level files and directories are moved into a temporary storage
directory, REORG.TODO, except for files that will certainly still
exist in their current form at top level when we're done (COPYING,
COPYING.LIB, LICENSES, NEWS, README), all old ChangeLog files (which
are moved to the new directory OldChangeLogs, instead), and the
generated file INSTALL (which is just deleted; in the new order, there
will be no generated files checked into version control).
Diffstat (limited to 'REORG.TODO/sysdeps/x86_64/_mcount.S')
-rw-r--r--REORG.TODO/sysdeps/x86_64/_mcount.S125
1 files changed, 125 insertions, 0 deletions
diff --git a/REORG.TODO/sysdeps/x86_64/_mcount.S b/REORG.TODO/sysdeps/x86_64/_mcount.S
new file mode 100644
index 0000000000..bcf0957752
--- /dev/null
+++ b/REORG.TODO/sysdeps/x86_64/_mcount.S
@@ -0,0 +1,125 @@
+/* Machine-specific calling sequence for `mcount' profiling function.  x86-64 version.
+   Copyright (C) 2002-2017 Free Software Foundation, Inc.
+   Contributed by Andreas Jaeger <aj@suse.de>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Assembly stub to invoke _mcount().  Compiler generated code calls
+   this stub after executing a function's prologue and without saving any
+   registers.  It is therefore necessary to preserve %rcx, %rdx, %rsi, %rdi,
+   %r8, %r9 as they may contain function arguments.  */
+
+#include <sysdep.h>
+
+ENTRY(_mcount)
+	/* Allocate space for 7 registers.  */
+	subq	$56,%rsp
+	cfi_adjust_cfa_offset (56)
+	movq	%rax,(%rsp)
+	cfi_rel_offset (rax, 0)
+	movq	%rcx,8(%rsp)
+	cfi_rel_offset (rcx, 8)
+	movq	%rdx,16(%rsp)
+	cfi_rel_offset (rdx, 16)
+	movq	%rsi,24(%rsp)
+	cfi_rel_offset (rsi, 24)
+	movq	%rdi,32(%rsp)
+	cfi_rel_offset (rdi, 32)
+	movq	%r8,40(%rsp)
+	cfi_rel_offset (r8, 40)
+	movq	%r9,48(%rsp)
+	cfi_rel_offset (r9, 48)
+
+	/* Setup parameter for __mcount_internal.  */
+	/* selfpc is the return address on the stack.  */
+	movq	56(%rsp),%rsi
+	/* Get frompc via the frame pointer.  */
+	movq	8(%rbp),%rdi
+	call C_SYMBOL_NAME(__mcount_internal)
+	/* Pop the saved registers.  Please note that `mcount' has no
+	   return value.  */
+	movq	48(%rsp),%r9
+	cfi_restore (r9)
+	movq	40(%rsp),%r8
+	cfi_restore (r8)
+	movq	32(%rsp),%rdi
+	cfi_restore (rdi)
+	movq	24(%rsp),%rsi
+	cfi_restore (rsi)
+	movq	16(%rsp),%rdx
+	cfi_restore (rdx)
+	movq	8(%rsp),%rcx
+	cfi_restore (rcx)
+	movq	(%rsp),%rax
+	cfi_restore (rax)
+	addq	$56,%rsp
+	cfi_adjust_cfa_offset (-56)
+	ret
+END(_mcount)
+
+#undef mcount
+weak_alias (_mcount, mcount)
+
+/* __fentry__ is different from _mcount in that it is called before
+   function prolog.  This means (among other things) that it has non-standard
+   stack alignment on entry: (%RSP & 0xF) == 0.  */
+
+ENTRY(__fentry__)
+	/* Allocate space for 7 registers
+	   (+8 bytes for proper stack alignment).  */
+	subq	$64,%rsp
+	cfi_adjust_cfa_offset (64)
+	movq	%rax,(%rsp)
+	cfi_rel_offset (rax, 0)
+	movq	%rcx,8(%rsp)
+	cfi_rel_offset (rcx, 8)
+	movq	%rdx,16(%rsp)
+	cfi_rel_offset (rdx, 16)
+	movq	%rsi,24(%rsp)
+	cfi_rel_offset (rsi, 24)
+	movq	%rdi,32(%rsp)
+	cfi_rel_offset (rdi, 32)
+	movq	%r8,40(%rsp)
+	cfi_rel_offset (r8, 40)
+	movq	%r9,48(%rsp)
+	cfi_rel_offset (r9, 48)
+
+	/* Setup parameter for __mcount_internal.  */
+	/* selfpc is the return address on the stack.  */
+	movq	64(%rsp),%rsi
+	/* caller is the return address above it */
+	movq	72(%rsp),%rdi
+	call C_SYMBOL_NAME(__mcount_internal)
+	/* Pop the saved registers.  Please note that `__fentry__' has no
+	   return value.  */
+	movq	48(%rsp),%r9
+	cfi_restore (r9)
+	movq	40(%rsp),%r8
+	cfi_restore (r8)
+	movq	32(%rsp),%rdi
+	cfi_restore (rdi)
+	movq	24(%rsp),%rsi
+	cfi_restore (rsi)
+	movq	16(%rsp),%rdx
+	cfi_restore (rdx)
+	movq	8(%rsp),%rcx
+	cfi_restore (rcx)
+	movq	(%rsp),%rax
+	cfi_restore (rax)
+	addq	$64,%rsp
+	cfi_adjust_cfa_offset (-64)
+	ret
+END(__fentry__)