1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-11-30 08:19:09 +00:00

When setting up the new stack for a function in x86_64_wrap(), make

sure to make it 16-byte aligned, in keeping with amd64 calling
convention requirements.

Submitted by:	Mikore Li at sun dot com
This commit is contained in:
Bill Paul 2005-04-16 04:47:15 +00:00
parent b8aa843c63
commit d84ed2322c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=145133

View File

@ -91,12 +91,14 @@
.globl x86_64_wrap_end .globl x86_64_wrap_end
ENTRY(x86_64_wrap) ENTRY(x86_64_wrap)
push %rbp # insure that the stack
mov %rsp,%rbp # is 16-byte aligned
and $-16,%rsp #
subq $96,%rsp # allocate space on stack subq $96,%rsp # allocate space on stack
mov %rsi,96-8(%rsp) # save %rsi mov %rsi,96-8(%rsp) # save %rsi
mov %rdi,96-16(%rsp)# save %rdi mov %rdi,96-16(%rsp)# save %rdi
mov %rcx,%r10 # temporarily save %rcx in scratch mov %rcx,%r10 # temporarily save %rcx in scratch
mov %rsp,%rsi lea 56+8(%rbp),%rsi # source == old stack top (stack+56)
add $96+56,%rsi # source == old stack top (stack+56)
mov %rsp,%rdi # destination == new stack top mov %rsp,%rdi # destination == new stack top
mov $10,%rcx # count == 10 quadwords mov $10,%rcx # count == 10 quadwords
rep rep
@ -105,15 +107,15 @@ ENTRY(x86_64_wrap)
mov %rdx,%rsi # set up arg1 (%rdx -> %rsi) mov %rdx,%rsi # set up arg1 (%rdx -> %rsi)
mov %r8,%rdx # set up arg2 (%r8 -> %rdx) mov %r8,%rdx # set up arg2 (%r8 -> %rdx)
mov %r9,%rcx # set up arg3 (%r9 -> %rcx) mov %r9,%rcx # set up arg3 (%r9 -> %rcx)
mov 96+40(%rsp),%r8 # set up arg4 (stack+40 -> %r8) mov 40+8(%rbp),%r8 # set up arg4 (stack+40 -> %r8)
mov 96+48(%rsp),%r9 # set up arg5 (stack+48 -> %r9) mov 48+8(%rbp),%r9 # set up arg5 (stack+48 -> %r9)
xor %rax,%rax # clear return value xor %rax,%rax # clear return value
x86_64_wrap_call: x86_64_wrap_call:
mov $0xFF00FF00FF00FF00,%r11 mov $0xFF00FF00FF00FF00,%r11
callq *%r11 # call routine callq *%r11 # call routine
mov 96-16(%rsp),%rdi# restore %rdi mov 96-16(%rsp),%rdi# restore %rdi
mov 96-8(%rsp),%rsi # restore %rsi mov 96-8(%rsp),%rsi # restore %rsi
addq $96,%rsp # delete space on stack leave # delete space on stack
ret ret
x86_64_wrap_end: x86_64_wrap_end: