1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
/*
* arm/ioreq.c: hardware virtual machine I/O emulation
*
* Copyright (c) 2019 Arm ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
#include <xen/domain.h>
#include <xen/ioreq.h>
#include <asm/traps.h>
#include <public/hvm/ioreq.h>
enum io_state handle_ioserv(struct cpu_user_regs *regs, struct vcpu *v)
{
const union hsr hsr = { .bits = regs->hsr };
const struct hsr_dabt dabt = hsr.dabt;
/* Code is similar to handle_read */
register_t r = v->io.req.data;
/* We are done with the IO */
v->io.req.state = STATE_IOREQ_NONE;
if ( dabt.write )
return IO_HANDLED;
r = sign_extend(dabt, r);
set_user_reg(regs, dabt.reg, r);
return IO_HANDLED;
}
enum io_state try_fwd_ioserv(struct cpu_user_regs *regs,
struct vcpu *v, mmio_info_t *info)
{
struct vcpu_io *vio = &v->io;
ioreq_t p = {
.type = IOREQ_TYPE_COPY,
.addr = info->gpa,
.size = 1 << info->dabt.size,
.count = 1,
.dir = !info->dabt.write,
/*
* On x86, df is used by 'rep' instruction to tell the direction
* to iterate (forward or backward).
* On Arm, all the accesses to MMIO region will do a single
* memory access. So for now, we can safely always set to 0.
*/
.df = 0,
.data = get_user_reg(regs, info->dabt.reg),
.state = STATE_IOREQ_READY,
};
struct ioreq_server *s = NULL;
enum io_state rc;
switch ( vio->req.state )
{
case STATE_IOREQ_NONE:
break;
case STATE_IORESP_READY:
return IO_HANDLED;
default:
gdprintk(XENLOG_ERR, "wrong state %u\n", vio->req.state);
return IO_ABORT;
}
s = ioreq_server_select(v->domain, &p);
if ( !s )
return IO_UNHANDLED;
if ( !info->dabt.valid )
return IO_ABORT;
vio->req = p;
rc = ioreq_send(s, &p, 0);
if ( rc != IO_RETRY || v->domain->is_shutting_down )
vio->req.state = STATE_IOREQ_NONE;
else if ( !ioreq_needs_completion(&vio->req) )
rc = IO_HANDLED;
else
vio->completion = IO_mmio_completion;
return rc;
}
bool ioreq_complete_mmio(void)
{
struct vcpu *v = current;
struct cpu_user_regs *regs = guest_cpu_user_regs();
const union hsr hsr = { .bits = regs->hsr };
paddr_t addr = v->io.req.addr;
if ( try_handle_mmio(regs, hsr, addr) == IO_HANDLED )
{
advance_pc(regs, hsr);
return true;
}
return false;
}
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
|