summaryrefslogtreecommitdiff
path: root/xen/arch/x86/hvm/ioreq.c
blob: 009a95afba73d366bf28d20256152c458d7bae29 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
/*
 * hvm/io.c: hardware virtual machine I/O emulation
 *
 * Copyright (c) 2016 Citrix Systems Inc.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; If not, see <http://www.gnu.org/licenses/>.
 */

#include <xen/ctype.h>
#include <xen/domain.h>
#include <xen/event.h>
#include <xen/init.h>
#include <xen/irq.h>
#include <xen/lib.h>
#include <xen/paging.h>
#include <xen/sched.h>
#include <xen/softirq.h>
#include <xen/trace.h>
#include <xen/vpci.h>

#include <asm/hvm/emulate.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/ioreq.h>
#include <asm/hvm/vmx/vmx.h>

#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>

bool ioreq_complete_mmio(void)
{
    return handle_mmio();
}

bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion)
{
    switch ( io_completion )
    {
    case HVMIO_realmode_completion:
    {
        struct hvm_emulate_ctxt ctxt;

        hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
        vmx_realmode_emulate_one(&ctxt);
        hvm_emulate_writeback(&ctxt);

        break;
    }

    default:
        ASSERT_UNREACHABLE();
        break;
    }

    return true;
}

static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server *s)
{
    struct domain *d = s->target;
    unsigned int i;

    BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN != HVM_PARAM_IOREQ_PFN + 1);

    for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
    {
        if ( !test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask) )
            return _gfn(d->arch.hvm.params[i]);
    }

    return INVALID_GFN;
}

static gfn_t hvm_alloc_ioreq_gfn(struct ioreq_server *s)
{
    struct domain *d = s->target;
    unsigned int i;

    for ( i = 0; i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8; i++ )
    {
        if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.mask) )
            return _gfn(d->arch.hvm.ioreq_gfn.base + i);
    }

    /*
     * If we are out of 'normal' GFNs then we may still have a 'legacy'
     * GFN available.
     */
    return hvm_alloc_legacy_ioreq_gfn(s);
}

static bool hvm_free_legacy_ioreq_gfn(struct ioreq_server *s,
                                      gfn_t gfn)
{
    struct domain *d = s->target;
    unsigned int i;

    for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
    {
        if ( gfn_eq(gfn, _gfn(d->arch.hvm.params[i])) )
             break;
    }
    if ( i > HVM_PARAM_BUFIOREQ_PFN )
        return false;

    set_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask);
    return true;
}

static void hvm_free_ioreq_gfn(struct ioreq_server *s, gfn_t gfn)
{
    struct domain *d = s->target;
    unsigned int i = gfn_x(gfn) - d->arch.hvm.ioreq_gfn.base;

    ASSERT(!gfn_eq(gfn, INVALID_GFN));

    if ( !hvm_free_legacy_ioreq_gfn(s, gfn) )
    {
        ASSERT(i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8);
        set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
    }
}

static void hvm_unmap_ioreq_gfn(struct ioreq_server *s, bool buf)
{
    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;

    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
        return;

    destroy_ring_for_helper(&iorp->va, iorp->page);
    iorp->page = NULL;

    hvm_free_ioreq_gfn(s, iorp->gfn);
    iorp->gfn = INVALID_GFN;
}

static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool buf)
{
    struct domain *d = s->target;
    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
    int rc;

    if ( iorp->page )
    {
        /*
         * If a page has already been allocated (which will happen on
         * demand if hvm_get_ioreq_server_frame() is called), then
         * mapping a guest frame is not permitted.
         */
        if ( gfn_eq(iorp->gfn, INVALID_GFN) )
            return -EPERM;

        return 0;
    }

    if ( d->is_dying )
        return -EINVAL;

    iorp->gfn = hvm_alloc_ioreq_gfn(s);

    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
        return -ENOMEM;

    rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
                                 &iorp->va);

    if ( rc )
        hvm_unmap_ioreq_gfn(s, buf);

    return rc;
}

static void hvm_remove_ioreq_gfn(struct ioreq_server *s, bool buf)

{
    struct domain *d = s->target;
    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;

    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
        return;

    if ( guest_physmap_remove_page(d, iorp->gfn,
                                   page_to_mfn(iorp->page), 0) )
        domain_crash(d);
    clear_page(iorp->va);
}

static int hvm_add_ioreq_gfn(struct ioreq_server *s, bool buf)
{
    struct domain *d = s->target;
    struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
    int rc;

    if ( gfn_eq(iorp->gfn, INVALID_GFN) )
        return 0;

    clear_page(iorp->va);

    rc = guest_physmap_add_page(d, iorp->gfn,
                                page_to_mfn(iorp->page), 0);
    if ( rc == 0 )
        paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn)));

    return rc;
}

int arch_ioreq_server_map_pages(struct ioreq_server *s)
{
    int rc;

    rc = hvm_map_ioreq_gfn(s, false);

    if ( !rc && HANDLE_BUFIOREQ(s) )
        rc = hvm_map_ioreq_gfn(s, true);

    if ( rc )
        hvm_unmap_ioreq_gfn(s, false);

    return rc;
}

void arch_ioreq_server_unmap_pages(struct ioreq_server *s)
{
    hvm_unmap_ioreq_gfn(s, true);
    hvm_unmap_ioreq_gfn(s, false);
}

void arch_ioreq_server_enable(struct ioreq_server *s)
{
    hvm_remove_ioreq_gfn(s, false);
    hvm_remove_ioreq_gfn(s, true);
}

void arch_ioreq_server_disable(struct ioreq_server *s)
{
    hvm_add_ioreq_gfn(s, true);
    hvm_add_ioreq_gfn(s, false);
}

/* Called when target domain is paused */
void arch_ioreq_server_destroy(struct ioreq_server *s)
{
    p2m_set_ioreq_server(s->target, 0, s);
}

/* Called with ioreq_server lock held */
int arch_ioreq_server_map_mem_type(struct domain *d,
                                   struct ioreq_server *s,
                                   uint32_t flags)
{
    int rc = p2m_set_ioreq_server(d, flags, s);

    if ( rc == 0 && flags == 0 )
    {
        const struct p2m_domain *p2m = p2m_get_hostp2m(d);

        if ( read_atomic(&p2m->ioreq.entry_count) )
            p2m_change_entry_type_global(d, p2m_ioreq_server, p2m_ram_rw);
    }

    return rc;
}

bool arch_ioreq_server_destroy_all(struct domain *d)
{
    return relocate_portio_handler(d, 0xcf8, 0xcf8, 4);
}

int arch_ioreq_server_get_type_addr(const struct domain *d,
                                    const ioreq_t *p,
                                    uint8_t *type,
                                    uint64_t *addr)
{
    unsigned int cf8 = d->arch.hvm.pci_cf8;

    if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
        return -EINVAL;

    if ( p->type == IOREQ_TYPE_PIO &&
         (p->addr & ~3) == 0xcfc &&
         CF8_ENABLED(cf8) )
    {
        unsigned int x86_fam, reg;
        pci_sbdf_t sbdf;

        reg = hvm_pci_decode_addr(cf8, p->addr, &sbdf);

        /* PCI config data cycle */
        *type = XEN_DMOP_IO_RANGE_PCI;
        *addr = ((uint64_t)sbdf.sbdf << 32) | reg;
        /* AMD extended configuration space access? */
        if ( CF8_ADDR_HI(cf8) &&
             d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
             (x86_fam = get_cpu_family(
                 d->arch.cpuid->basic.raw_fms, NULL, NULL)) >= 0x10 &&
             x86_fam < 0x17 )
        {
            uint64_t msr_val;

            if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) &&
                 (msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
                *addr |= CF8_ADDR_HI(cf8);
        }
    }
    else
    {
        *type = (p->type == IOREQ_TYPE_PIO) ?
                 XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
        *addr = p->addr;
    }

    return 0;
}

static int hvm_access_cf8(
    int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
    struct domain *d = current->domain;

    if ( dir == IOREQ_WRITE && bytes == 4 )
        d->arch.hvm.pci_cf8 = *val;

    /* We always need to fall through to the catch all emulator */
    return X86EMUL_UNHANDLEABLE;
}

void arch_ioreq_domain_init(struct domain *d)
{
    register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
}

/*
 * Local variables:
 * mode: C
 * c-file-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */