1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
|
/*
* entry.S: VMX architecture-specific entry/exit handling.
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2008, Citrix Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
.file "vmx/entry.S"
#include <asm/asm_defns.h>
#include <asm/page.h>
#define VMRESUME .byte 0x0f,0x01,0xc3
#define VMLAUNCH .byte 0x0f,0x01,0xc2
ENTRY(vmx_asm_vmexit_handler)
SAVE_ALL
mov %cr2,%rax
GET_CURRENT(bx)
movb $1,VCPU_vmx_launched(%rbx)
mov %rax,VCPU_hvm_guest_cr2(%rbx)
/* SPEC_CTRL_ENTRY_FROM_VMX Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM
.macro restore_spec_ctrl
mov $MSR_SPEC_CTRL, %ecx
movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
xor %edx, %edx
wrmsr
.endm
ALTERNATIVE "", restore_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
/* Hardware clears MSR_DEBUGCTL on VMExit. Reinstate it if debugging Xen. */
.macro restore_lbr
mov $IA32_DEBUGCTLMSR_LBR, %eax
mov $MSR_IA32_DEBUGCTLMSR, %ecx
xor %edx, %edx
wrmsr
.endm
ALTERNATIVE "", restore_lbr, X86_FEATURE_XEN_LBR
mov %rsp,%rdi
call vmx_vmexit_handler
.Lvmx_do_vmentry:
call vmx_intr_assist
call nvmx_switch_guest
ASSERT_NOT_IN_ATOMIC
mov VCPU_processor(%rbx),%eax
lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
xor %ecx,%ecx
shl $IRQSTAT_shift,%eax
cli
cmp %ecx,(%rdx,%rax,1)
jnz .Lvmx_process_softirqs
cmp %cl,VCPU_vmx_emulate(%rbx)
jne .Lvmx_goto_emulator
cmp %cl,VCPU_vmx_realmode(%rbx)
UNLIKELY_START(ne, realmode)
cmp %cx,VCPU_vm86_seg_mask(%rbx)
jnz .Lvmx_goto_emulator
mov %rsp,%rdi
call vmx_enter_realmode
UNLIKELY_END(realmode)
mov %rsp,%rdi
call vmx_vmenter_helper
test %al, %al
jz .Lvmx_vmentry_restart
mov VCPU_arch_msrs(%rbx), %rax
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
/* SPEC_CTRL_EXIT_TO_VMX Req: %rsp=regs/cpuinfo Clob: */
ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), X86_FEATURE_SC_VERW_HVM
mov VCPU_hvm_guest_cr2(%rbx),%rax
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
mov %rax,%cr2
cmpb $0,VCPU_vmx_launched(%rbx)
pop %rbx
pop %r11
pop %r10
pop %r9
pop %r8
pop %rax
pop %rcx
pop %rdx
pop %rsi
pop %rdi
je .Lvmx_launch
/*.Lvmx_resume:*/
VMRESUME
jmp .Lvmx_vmentry_fail
.Lvmx_launch:
VMLAUNCH
.Lvmx_vmentry_fail:
sti
SAVE_ALL
/*
* SPEC_CTRL_ENTRY notes
*
* If we end up here, no guest code has executed. The MSR lists have
* not been processed, so we still have Xen's choice of MSR_SPEC_CTRL
* in context, and the RSB is unchanged.
*/
call vmx_vmentry_failure
jmp .Lvmx_process_softirqs
ENTRY(vmx_asm_do_vmentry)
GET_CURRENT(bx)
jmp .Lvmx_do_vmentry
.Lvmx_vmentry_restart:
sti
jmp .Lvmx_do_vmentry
.Lvmx_goto_emulator:
sti
mov %rsp,%rdi
call vmx_realmode
jmp .Lvmx_do_vmentry
.Lvmx_process_softirqs:
sti
call do_softirq
jmp .Lvmx_do_vmentry
|