1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
/*
* include/asm-x86/monitor.h
*
* Arch-specific monitor_op domctl handler.
*
* Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com)
* Copyright (c) 2016, Bitdefender S.R.L.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_X86_MONITOR_H__
#define __ASM_X86_MONITOR_H__
#include <xen/sched.h>
#define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index))
struct monitor_msr_bitmap {
DECLARE_BITMAP(low, 8192);
DECLARE_BITMAP(hypervisor, 8192);
DECLARE_BITMAP(high, 8192);
};
static inline
void arch_monitor_allow_userspace(struct domain *d, bool allow_userspace)
{
d->arch.monitor.guest_request_userspace_enabled = allow_userspace;
}
static inline
int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop)
{
int rc = 0;
switch ( mop->op )
{
case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP:
domain_pause(d);
/*
* Enabling mem_access_emulate_each_rep without a vm_event subscriber
* is meaningless.
*/
if ( d->max_vcpus && d->vcpu[0] && d->vcpu[0]->arch.vm_event )
d->arch.mem_access_emulate_each_rep = !!mop->event;
else
rc = -EINVAL;
domain_unpause(d);
break;
case XEN_DOMCTL_MONITOR_OP_CONTROL_REGISTERS:
d->arch.monitor.control_register_values = true;
break;
default:
rc = -EOPNOTSUPP;
}
return rc;
}
static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
{
uint32_t capabilities = 0;
/*
* At the moment only Intel and AMD HVM domains are supported. However,
* event delivery could be extended to PV domains.
*/
if ( !is_hvm_domain(d) )
return capabilities;
capabilities = ((1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
(1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
(1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) |
(1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) |
(1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
(1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
(1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
(1U << XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED) |
(1U << XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT));
if ( hvm_is_singlestep_supported() )
capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
if ( hvm_has_set_descriptor_access_exiting() )
capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS);
return capabilities;
}
int arch_monitor_domctl_event(struct domain *d,
struct xen_domctl_monitor_op *mop);
#ifdef CONFIG_HVM
int arch_monitor_init_domain(struct domain *d);
void arch_monitor_cleanup_domain(struct domain *d);
#else
static inline int arch_monitor_init_domain(struct domain *d)
{
return -EOPNOTSUPP;
}
static inline void arch_monitor_cleanup_domain(struct domain *d) {}
#endif
bool monitored_msr(const struct domain *d, u32 msr);
bool monitored_msr_onchangeonly(const struct domain *d, u32 msr);
#endif /* __ASM_X86_MONITOR_H__ */
|