summaryrefslogtreecommitdiff
path: root/xen
diff options
context:
space:
mode:
authorOleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>2020-11-30 12:31:27 +0200
committerAlex Bennée <alex.bennee@linaro.org>2020-11-30 15:12:50 +0000
commitdc67273572685e7a8d864bb4835af0b50eb7c8fc (patch)
tree96a63f04cb3fde02bfea2de245930c4c696cab2a /xen
parent1a534d5fd0af42a28b22b21252af68ca47a86ddd (diff)
xen/ioreq: Remove "hvm" prefixes from involved function names
This patch removes "hvm" prefixes and infixes from IOREQ related function names in the common code and performs a renaming where appropriate according to the more consistent new naming scheme: - IOREQ server functions should start with "ioreq_server_" - IOREQ functions should start with "ioreq_" A few function names are clarified to better fit into their purposes: handle_hvm_io_completion -> vcpu_ioreq_handle_completion hvm_io_pending -> vcpu_ioreq_pending hvm_ioreq_init -> ioreq_domain_init hvm_alloc_ioreq_mfn -> ioreq_server_alloc_mfn hvm_free_ioreq_mfn -> ioreq_server_free_mfn Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> CC: Julien Grall <julien.grall@arm.com> Message-Id: <1606732298-22107-13-git-send-email-olekstysh@gmail.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/hvm/dm.c4
-rw-r--r--xen/arch/x86/hvm/emulate.c6
-rw-r--r--xen/arch/x86/hvm/hvm.c10
-rw-r--r--xen/arch/x86/hvm/io.c6
-rw-r--r--xen/arch/x86/hvm/ioreq.c2
-rw-r--r--xen/arch/x86/hvm/stdvga.c4
-rw-r--r--xen/arch/x86/hvm/vmx/vvmx.c2
-rw-r--r--xen/common/dm.c28
-rw-r--r--xen/common/ioreq.c174
-rw-r--r--xen/common/memory.c2
-rw-r--r--xen/include/xen/ioreq.h67
11 files changed, 153 insertions, 152 deletions
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 35f860aba1..0b6319eee5 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -352,8 +352,8 @@ int arch_dm_op(struct xen_dm_op *op, struct domain *d,
break;
if ( first_gfn == 0 )
- rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
- data->type, data->flags);
+ rc = ioreq_server_map_mem_type(d, data->id,
+ data->type, data->flags);
else
rc = 0;
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 04e49945b3..a025f89824 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -261,7 +261,7 @@ static int hvmemul_do_io(
* an ioreq server that can handle it.
*
* Rules:
- * A> PIO or MMIO accesses run through hvm_select_ioreq_server() to
+ * A> PIO or MMIO accesses run through ioreq_server_select() to
* choose the ioreq server by range. If no server is found, the access
* is ignored.
*
@@ -323,7 +323,7 @@ static int hvmemul_do_io(
}
if ( !s )
- s = hvm_select_ioreq_server(currd, &p);
+ s = ioreq_server_select(currd, &p);
/* If there is no suitable backing DM, just ignore accesses */
if ( !s )
@@ -333,7 +333,7 @@ static int hvmemul_do_io(
}
else
{
- rc = hvm_send_ioreq(s, &p, 0);
+ rc = ioreq_send(s, &p, 0);
if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
vio->req.state = STATE_IOREQ_NONE;
else if ( !ioreq_needs_completion(&vio->req) )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index cc469093cc..8e3c2e2592 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -546,7 +546,7 @@ void hvm_do_resume(struct vcpu *v)
pt_restore_timer(v);
- if ( !handle_hvm_io_completion(v) )
+ if ( !vcpu_ioreq_handle_completion(v) )
return;
if ( unlikely(v->arch.vm_event) )
@@ -677,7 +677,7 @@ int hvm_domain_initialise(struct domain *d)
register_g2m_portio_handler(d);
register_vpci_portio_handler(d);
- hvm_ioreq_init(d);
+ ioreq_domain_init(d);
hvm_init_guest_time(d);
@@ -739,7 +739,7 @@ void hvm_domain_relinquish_resources(struct domain *d)
viridian_domain_deinit(d);
- hvm_destroy_all_ioreq_servers(d);
+ ioreq_server_destroy_all(d);
msixtbl_pt_cleanup(d);
@@ -1582,7 +1582,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
if ( rc )
goto fail5;
- rc = hvm_all_ioreq_servers_add_vcpu(d, v);
+ rc = ioreq_server_add_vcpu_all(d, v);
if ( rc != 0 )
goto fail6;
@@ -1618,7 +1618,7 @@ void hvm_vcpu_destroy(struct vcpu *v)
{
viridian_vcpu_deinit(v);
- hvm_all_ioreq_servers_remove_vcpu(v->domain, v);
+ ioreq_server_remove_vcpu_all(v->domain, v);
if ( hvm_altp2m_supported() )
altp2m_vcpu_destroy(v);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 327a6a2797..a0dd8d1ad6 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -60,7 +60,7 @@ void send_timeoffset_req(unsigned long timeoff)
if ( timeoff == 0 )
return;
- if ( hvm_broadcast_ioreq(&p, true) != 0 )
+ if ( ioreq_broadcast(&p, true) != 0 )
gprintk(XENLOG_ERR, "Unsuccessful timeoffset update\n");
}
@@ -74,7 +74,7 @@ void send_invalidate_req(void)
.data = ~0UL, /* flush all */
};
- if ( hvm_broadcast_ioreq(&p, false) != 0 )
+ if ( ioreq_broadcast(&p, false) != 0 )
gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n");
}
@@ -155,7 +155,7 @@ bool handle_pio(uint16_t port, unsigned int size, int dir)
* We should not advance RIP/EIP if the domain is shutting down or
* if X86EMUL_RETRY has been returned by an internal handler.
*/
- if ( curr->domain->is_shutting_down || !hvm_io_pending(curr) )
+ if ( curr->domain->is_shutting_down || !vcpu_ioreq_pending(curr) )
return false;
break;
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 7808b75d72..934189e3ba 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -154,7 +154,7 @@ static int hvm_map_ioreq_gfn(struct ioreq_server *s, bool buf)
{
/*
* If a page has already been allocated (which will happen on
- * demand if hvm_get_ioreq_server_frame() is called), then
+ * demand if ioreq_server_get_frame() is called), then
* mapping a guest frame is not permitted.
*/
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index bafb3f63d9..390ac512a3 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -507,11 +507,11 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
}
done:
- srv = hvm_select_ioreq_server(current->domain, &p);
+ srv = ioreq_server_select(current->domain, &p);
if ( !srv )
return X86EMUL_UNHANDLEABLE;
- return hvm_send_ioreq(srv, &p, 1);
+ return ioreq_send(srv, &p, 1);
}
static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 3a37e9ebea..a4813f00f5 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1516,7 +1516,7 @@ void nvmx_switch_guest(void)
* don't want to continue as this setup is not implemented nor supported
* as of right now.
*/
- if ( hvm_io_pending(v) )
+ if ( vcpu_ioreq_pending(v) )
return;
/*
* a softirq may interrupt us between a virtual vmentry is
diff --git a/xen/common/dm.c b/xen/common/dm.c
index 36e01a2a93..9d394fcf14 100644
--- a/xen/common/dm.c
+++ b/xen/common/dm.c
@@ -100,8 +100,8 @@ static int dm_op(const struct dmop_args *op_args)
if ( data->pad[0] || data->pad[1] || data->pad[2] )
break;
- rc = hvm_create_ioreq_server(d, data->handle_bufioreq,
- &data->id);
+ rc = ioreq_server_create(d, data->handle_bufioreq,
+ &data->id);
break;
}
@@ -117,12 +117,12 @@ static int dm_op(const struct dmop_args *op_args)
if ( data->flags & ~valid_flags )
break;
- rc = hvm_get_ioreq_server_info(d, data->id,
- (data->flags & XEN_DMOP_no_gfns) ?
- NULL : (unsigned long *)&data->ioreq_gfn,
- (data->flags & XEN_DMOP_no_gfns) ?
- NULL : (unsigned long *)&data->bufioreq_gfn,
- &data->bufioreq_port);
+ rc = ioreq_server_get_info(d, data->id,
+ (data->flags & XEN_DMOP_no_gfns) ?
+ NULL : (unsigned long *)&data->ioreq_gfn,
+ (data->flags & XEN_DMOP_no_gfns) ?
+ NULL : (unsigned long *)&data->bufioreq_gfn,
+ &data->bufioreq_port);
break;
}
@@ -135,8 +135,8 @@ static int dm_op(const struct dmop_args *op_args)
if ( data->pad )
break;
- rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type,
- data->start, data->end);
+ rc = ioreq_server_map_io_range(d, data->id, data->type,
+ data->start, data->end);
break;
}
@@ -149,8 +149,8 @@ static int dm_op(const struct dmop_args *op_args)
if ( data->pad )
break;
- rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type,
- data->start, data->end);
+ rc = ioreq_server_unmap_io_range(d, data->id, data->type,
+ data->start, data->end);
break;
}
@@ -163,7 +163,7 @@ static int dm_op(const struct dmop_args *op_args)
if ( data->pad )
break;
- rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled);
+ rc = ioreq_server_set_state(d, data->id, !!data->enabled);
break;
}
@@ -176,7 +176,7 @@ static int dm_op(const struct dmop_args *op_args)
if ( data->pad )
break;
- rc = hvm_destroy_ioreq_server(d, data->id);
+ rc = ioreq_server_destroy(d, data->id);
break;
}
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index caf4543adc..3ca5b960f3 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -59,7 +59,7 @@ static struct ioreq_server *get_ioreq_server(const struct domain *d,
* Iterate over all possible ioreq servers.
*
* NOTE: The iteration is backwards such that more recently created
- * ioreq servers are favoured in hvm_select_ioreq_server().
+ * ioreq servers are favoured in ioreq_server_select().
* This is a semantic that previously existed when ioreq servers
* were held in a linked list.
*/
@@ -106,12 +106,12 @@ static struct ioreq_vcpu *get_pending_vcpu(const struct vcpu *v,
return NULL;
}
-bool hvm_io_pending(struct vcpu *v)
+bool vcpu_ioreq_pending(struct vcpu *v)
{
return get_pending_vcpu(v, NULL);
}
-static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
+static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
{
unsigned int prev_state = STATE_IOREQ_NONE;
unsigned int state = p->state;
@@ -168,7 +168,7 @@ static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
return true;
}
-bool handle_hvm_io_completion(struct vcpu *v)
+bool vcpu_ioreq_handle_completion(struct vcpu *v)
{
struct domain *d = v->domain;
struct vcpu_io *vio = &v->io;
@@ -183,7 +183,7 @@ bool handle_hvm_io_completion(struct vcpu *v)
}
sv = get_pending_vcpu(v, &s);
- if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
+ if ( sv && !wait_for_io(sv, get_ioreq(s, v)) )
return false;
vio->req.state = ioreq_needs_completion(&vio->req) ?
@@ -214,7 +214,7 @@ bool handle_hvm_io_completion(struct vcpu *v)
return true;
}
-static int hvm_alloc_ioreq_mfn(struct ioreq_server *s, bool buf)
+static int ioreq_server_alloc_mfn(struct ioreq_server *s, bool buf)
{
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
struct page_info *page;
@@ -223,7 +223,7 @@ static int hvm_alloc_ioreq_mfn(struct ioreq_server *s, bool buf)
{
/*
* If a guest frame has already been mapped (which may happen
- * on demand if hvm_get_ioreq_server_info() is called), then
+ * on demand if ioreq_server_get_info() is called), then
* allocating a page is not permitted.
*/
if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
@@ -262,7 +262,7 @@ static int hvm_alloc_ioreq_mfn(struct ioreq_server *s, bool buf)
return -ENOMEM;
}
-static void hvm_free_ioreq_mfn(struct ioreq_server *s, bool buf)
+static void ioreq_server_free_mfn(struct ioreq_server *s, bool buf)
{
struct ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
struct page_info *page = iorp->page;
@@ -301,8 +301,8 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
return found;
}
-static void hvm_update_ioreq_evtchn(struct ioreq_server *s,
- struct ioreq_vcpu *sv)
+static void ioreq_update_evtchn(struct ioreq_server *s,
+ struct ioreq_vcpu *sv)
{
ASSERT(spin_is_locked(&s->lock));
@@ -314,8 +314,8 @@ static void hvm_update_ioreq_evtchn(struct ioreq_server *s,
}
}
-static int hvm_ioreq_server_add_vcpu(struct ioreq_server *s,
- struct vcpu *v)
+static int ioreq_server_add_vcpu(struct ioreq_server *s,
+ struct vcpu *v)
{
struct ioreq_vcpu *sv;
int rc;
@@ -350,7 +350,7 @@ static int hvm_ioreq_server_add_vcpu(struct ioreq_server *s,
list_add(&sv->list_entry, &s->ioreq_vcpu_list);
if ( s->enabled )
- hvm_update_ioreq_evtchn(s, sv);
+ ioreq_update_evtchn(s, sv);
spin_unlock(&s->lock);
return 0;
@@ -366,8 +366,8 @@ static int hvm_ioreq_server_add_vcpu(struct ioreq_server *s,
return rc;
}
-static void hvm_ioreq_server_remove_vcpu(struct ioreq_server *s,
- struct vcpu *v)
+static void ioreq_server_remove_vcpu(struct ioreq_server *s,
+ struct vcpu *v)
{
struct ioreq_vcpu *sv;
@@ -394,7 +394,7 @@ static void hvm_ioreq_server_remove_vcpu(struct ioreq_server *s,
spin_unlock(&s->lock);
}
-static void hvm_ioreq_server_remove_all_vcpus(struct ioreq_server *s)
+static void ioreq_server_remove_all_vcpus(struct ioreq_server *s)
{
struct ioreq_vcpu *sv, *next;
@@ -420,28 +420,28 @@ static void hvm_ioreq_server_remove_all_vcpus(struct ioreq_server *s)
spin_unlock(&s->lock);
}
-static int hvm_ioreq_server_alloc_pages(struct ioreq_server *s)
+static int ioreq_server_alloc_pages(struct ioreq_server *s)
{
int rc;
- rc = hvm_alloc_ioreq_mfn(s, false);
+ rc = ioreq_server_alloc_mfn(s, false);
if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) )
- rc = hvm_alloc_ioreq_mfn(s, true);
+ rc = ioreq_server_alloc_mfn(s, true);
if ( rc )
- hvm_free_ioreq_mfn(s, false);
+ ioreq_server_free_mfn(s, false);
return rc;
}
-static void hvm_ioreq_server_free_pages(struct ioreq_server *s)
+static void ioreq_server_free_pages(struct ioreq_server *s)
{
- hvm_free_ioreq_mfn(s, true);
- hvm_free_ioreq_mfn(s, false);
+ ioreq_server_free_mfn(s, true);
+ ioreq_server_free_mfn(s, false);
}
-static void hvm_ioreq_server_free_rangesets(struct ioreq_server *s)
+static void ioreq_server_free_rangesets(struct ioreq_server *s)
{
unsigned int i;
@@ -449,8 +449,8 @@ static void hvm_ioreq_server_free_rangesets(struct ioreq_server *s)
rangeset_destroy(s->range[i]);
}
-static int hvm_ioreq_server_alloc_rangesets(struct ioreq_server *s,
- ioservid_t id)
+static int ioreq_server_alloc_rangesets(struct ioreq_server *s,
+ ioservid_t id)
{
unsigned int i;
int rc;
@@ -482,12 +482,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct ioreq_server *s,
return 0;
fail:
- hvm_ioreq_server_free_rangesets(s);
+ ioreq_server_free_rangesets(s);
return rc;
}
-static void hvm_ioreq_server_enable(struct ioreq_server *s)
+static void ioreq_server_enable(struct ioreq_server *s)
{
struct ioreq_vcpu *sv;
@@ -503,13 +503,13 @@ static void hvm_ioreq_server_enable(struct ioreq_server *s)
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
list_entry )
- hvm_update_ioreq_evtchn(s, sv);
+ ioreq_update_evtchn(s, sv);
done:
spin_unlock(&s->lock);
}
-static void hvm_ioreq_server_disable(struct ioreq_server *s)
+static void ioreq_server_disable(struct ioreq_server *s)
{
spin_lock(&s->lock);
@@ -524,9 +524,9 @@ static void hvm_ioreq_server_disable(struct ioreq_server *s)
spin_unlock(&s->lock);
}
-static int hvm_ioreq_server_init(struct ioreq_server *s,
- struct domain *d, int bufioreq_handling,
- ioservid_t id)
+static int ioreq_server_init(struct ioreq_server *s,
+ struct domain *d, int bufioreq_handling,
+ ioservid_t id)
{
struct domain *currd = current->domain;
struct vcpu *v;
@@ -544,7 +544,7 @@ static int hvm_ioreq_server_init(struct ioreq_server *s,
s->ioreq.gfn = INVALID_GFN;
s->bufioreq.gfn = INVALID_GFN;
- rc = hvm_ioreq_server_alloc_rangesets(s, id);
+ rc = ioreq_server_alloc_rangesets(s, id);
if ( rc )
return rc;
@@ -552,7 +552,7 @@ static int hvm_ioreq_server_init(struct ioreq_server *s,
for_each_vcpu ( d, v )
{
- rc = hvm_ioreq_server_add_vcpu(s, v);
+ rc = ioreq_server_add_vcpu(s, v);
if ( rc )
goto fail_add;
}
@@ -560,23 +560,23 @@ static int hvm_ioreq_server_init(struct ioreq_server *s,
return 0;
fail_add:
- hvm_ioreq_server_remove_all_vcpus(s);
+ ioreq_server_remove_all_vcpus(s);
arch_ioreq_server_unmap_pages(s);
- hvm_ioreq_server_free_rangesets(s);
+ ioreq_server_free_rangesets(s);
put_domain(s->emulator);
return rc;
}
-static void hvm_ioreq_server_deinit(struct ioreq_server *s)
+static void ioreq_server_deinit(struct ioreq_server *s)
{
ASSERT(!s->enabled);
- hvm_ioreq_server_remove_all_vcpus(s);
+ ioreq_server_remove_all_vcpus(s);
/*
* NOTE: It is safe to call both arch_ioreq_server_unmap_pages() and
- * hvm_ioreq_server_free_pages() in that order.
+ * ioreq_server_free_pages() in that order.
* This is because the former will do nothing if the pages
* are not mapped, leaving the page to be freed by the latter.
* However if the pages are mapped then the former will set
@@ -584,15 +584,15 @@ static void hvm_ioreq_server_deinit(struct ioreq_server *s)
* nothing.
*/
arch_ioreq_server_unmap_pages(s);
- hvm_ioreq_server_free_pages(s);
+ ioreq_server_free_pages(s);
- hvm_ioreq_server_free_rangesets(s);
+ ioreq_server_free_rangesets(s);
put_domain(s->emulator);
}
-int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
- ioservid_t *id)
+int ioreq_server_create(struct domain *d, int bufioreq_handling,
+ ioservid_t *id)
{
struct ioreq_server *s;
unsigned int i;
@@ -620,11 +620,11 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
/*
* It is safe to call set_ioreq_server() prior to
- * hvm_ioreq_server_init() since the target domain is paused.
+ * ioreq_server_init() since the target domain is paused.
*/
set_ioreq_server(d, i, s);
- rc = hvm_ioreq_server_init(s, d, bufioreq_handling, i);
+ rc = ioreq_server_init(s, d, bufioreq_handling, i);
if ( rc )
{
set_ioreq_server(d, i, NULL);
@@ -647,7 +647,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
return rc;
}
-int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
+int ioreq_server_destroy(struct domain *d, ioservid_t id)
{
struct ioreq_server *s;
int rc;
@@ -668,13 +668,13 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
arch_ioreq_server_destroy(s);
- hvm_ioreq_server_disable(s);
+ ioreq_server_disable(s);
/*
- * It is safe to call hvm_ioreq_server_deinit() prior to
+ * It is safe to call ioreq_server_deinit() prior to
* set_ioreq_server() since the target domain is paused.
*/
- hvm_ioreq_server_deinit(s);
+ ioreq_server_deinit(s);
set_ioreq_server(d, id, NULL);
domain_unpause(d);
@@ -689,10 +689,10 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
return rc;
}
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
- unsigned long *ioreq_gfn,
- unsigned long *bufioreq_gfn,
- evtchn_port_t *bufioreq_port)
+int ioreq_server_get_info(struct domain *d, ioservid_t id,
+ unsigned long *ioreq_gfn,
+ unsigned long *bufioreq_gfn,
+ evtchn_port_t *bufioreq_port)
{
struct ioreq_server *s;
int rc;
@@ -736,8 +736,8 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
return rc;
}
-int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
- unsigned long idx, mfn_t *mfn)
+int ioreq_server_get_frame(struct domain *d, ioservid_t id,
+ unsigned long idx, mfn_t *mfn)
{
struct ioreq_server *s;
int rc;
@@ -756,7 +756,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
if ( s->emulator != current->domain )
goto out;
- rc = hvm_ioreq_server_alloc_pages(s);
+ rc = ioreq_server_alloc_pages(s);
if ( rc )
goto out;
@@ -787,9 +787,9 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
return rc;
}
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
- uint32_t type, uint64_t start,
- uint64_t end)
+int ioreq_server_map_io_range(struct domain *d, ioservid_t id,
+ uint32_t type, uint64_t start,
+ uint64_t end)
{
struct ioreq_server *s;
struct rangeset *r;
@@ -839,9 +839,9 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
return rc;
}
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
- uint32_t type, uint64_t start,
- uint64_t end)
+int ioreq_server_unmap_io_range(struct domain *d, ioservid_t id,
+ uint32_t type, uint64_t start,
+ uint64_t end)
{
struct ioreq_server *s;
struct rangeset *r;
@@ -899,8 +899,8 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
* Support for the emulation of read operations can be added when an ioreq
* server has such requirement in the future.
*/
-int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
- uint32_t type, uint32_t flags)
+int ioreq_server_map_mem_type(struct domain *d, ioservid_t id,
+ uint32_t type, uint32_t flags)
{
struct ioreq_server *s;
int rc;
@@ -931,8 +931,8 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
return rc;
}
-int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
- bool enabled)
+int ioreq_server_set_state(struct domain *d, ioservid_t id,
+ bool enabled)
{
struct ioreq_server *s;
int rc;
@@ -952,9 +952,9 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
domain_pause(d);
if ( enabled )
- hvm_ioreq_server_enable(s);
+ ioreq_server_enable(s);
else
- hvm_ioreq_server_disable(s);
+ ioreq_server_disable(s);
domain_unpause(d);
@@ -965,7 +965,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
return rc;
}
-int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
+int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v)
{
struct ioreq_server *s;
unsigned int id;
@@ -975,7 +975,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
FOR_EACH_IOREQ_SERVER(d, id, s)
{
- rc = hvm_ioreq_server_add_vcpu(s, v);
+ rc = ioreq_server_add_vcpu(s, v);
if ( rc )
goto fail;
}
@@ -992,7 +992,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
if ( !s )
continue;
- hvm_ioreq_server_remove_vcpu(s, v);
+ ioreq_server_remove_vcpu(s, v);
}
spin_unlock_recursive(&d->ioreq_server.lock);
@@ -1000,7 +1000,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
return rc;
}
-void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
+void ioreq_server_remove_vcpu_all(struct domain *d, struct vcpu *v)
{
struct ioreq_server *s;
unsigned int id;
@@ -1008,12 +1008,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
- hvm_ioreq_server_remove_vcpu(s, v);
+ ioreq_server_remove_vcpu(s, v);
spin_unlock_recursive(&d->ioreq_server.lock);
}
-void hvm_destroy_all_ioreq_servers(struct domain *d)
+void ioreq_server_destroy_all(struct domain *d)
{
struct ioreq_server *s;
unsigned int id;
@@ -1027,13 +1027,13 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
FOR_EACH_IOREQ_SERVER(d, id, s)
{
- hvm_ioreq_server_disable(s);
+ ioreq_server_disable(s);
/*
- * It is safe to call hvm_ioreq_server_deinit() prior to
+ * It is safe to call ioreq_server_deinit() prior to
* set_ioreq_server() since the target domain is being destroyed.
*/
- hvm_ioreq_server_deinit(s);
+ ioreq_server_deinit(s);
set_ioreq_server(d, id, NULL);
xfree(s);
@@ -1042,8 +1042,8 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
spin_unlock_recursive(&d->ioreq_server.lock);
}
-struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
- ioreq_t *p)
+struct ioreq_server *ioreq_server_select(struct domain *d,
+ ioreq_t *p)
{
struct ioreq_server *s;
uint8_t type;
@@ -1098,7 +1098,7 @@ struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
return NULL;
}
-static int hvm_send_buffered_ioreq(struct ioreq_server *s, ioreq_t *p)
+static int ioreq_send_buffered(struct ioreq_server *s, ioreq_t *p)
{
struct domain *d = current->domain;
struct ioreq_page *iorp;
@@ -1191,8 +1191,8 @@ static int hvm_send_buffered_ioreq(struct ioreq_server *s, ioreq_t *p)
return IOREQ_STATUS_HANDLED;
}
-int hvm_send_ioreq(struct ioreq_server *s, ioreq_t *proto_p,
- bool buffered)
+int ioreq_send(struct ioreq_server *s, ioreq_t *proto_p,
+ bool buffered)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
@@ -1201,7 +1201,7 @@ int hvm_send_ioreq(struct ioreq_server *s, ioreq_t *proto_p,
ASSERT(s);
if ( buffered )
- return hvm_send_buffered_ioreq(s, proto_p);
+ return ioreq_send_buffered(s, proto_p);
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
return IOREQ_STATUS_RETRY;
@@ -1251,7 +1251,7 @@ int hvm_send_ioreq(struct ioreq_server *s, ioreq_t *proto_p,
return IOREQ_STATUS_UNHANDLED;
}
-unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
+unsigned int ioreq_broadcast(ioreq_t *p, bool buffered)
{
struct domain *d = current->domain;
struct ioreq_server *s;
@@ -1262,14 +1262,14 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
if ( !s->enabled )
continue;
- if ( hvm_send_ioreq(s, p, buffered) == IOREQ_STATUS_UNHANDLED )
+ if ( ioreq_send(s, p, buffered) == IOREQ_STATUS_UNHANDLED )
failed++;
}
return failed;
}
-void hvm_ioreq_init(struct domain *d)
+void ioreq_domain_init(struct domain *d)
{
spin_lock_init(&d->ioreq_server.lock);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 92cf98386e..3363c068b7 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1108,7 +1108,7 @@ static int acquire_ioreq_server(struct domain *d,
{
mfn_t mfn;
- rc = hvm_get_ioreq_server_frame(d, id, frame + i, &mfn);
+ rc = ioreq_server_get_frame(d, id, frame + i, &mfn);
if ( rc )
return rc;
diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h
index 979afa03e2..02ff998c57 100644
--- a/xen/include/xen/ioreq.h
+++ b/xen/include/xen/ioreq.h
@@ -81,41 +81,42 @@ static inline bool ioreq_needs_completion(const ioreq_t *ioreq)
#define HANDLE_BUFIOREQ(s) \
((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF)
-bool hvm_io_pending(struct vcpu *v);
-bool handle_hvm_io_completion(struct vcpu *v);
+bool vcpu_ioreq_pending(struct vcpu *v);
+bool vcpu_ioreq_handle_completion(struct vcpu *v);
bool is_ioreq_server_page(struct domain *d, const struct page_info *page);
-int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
- ioservid_t *id);
-int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
-int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
- unsigned long *ioreq_gfn,
- unsigned long *bufioreq_gfn,
- evtchn_port_t *bufioreq_port);
-int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
- unsigned long idx, mfn_t *mfn);
-int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
- uint32_t type, uint64_t start,
- uint64_t end);
-int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
- uint32_t type, uint64_t start,
- uint64_t end);
-int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
- uint32_t type, uint32_t flags);
-int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
- bool enabled);
-
-int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v);
-void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v);
-void hvm_destroy_all_ioreq_servers(struct domain *d);
-
-struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
- ioreq_t *p);
-int hvm_send_ioreq(struct ioreq_server *s, ioreq_t *proto_p,
- bool buffered);
-unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
-
-void hvm_ioreq_init(struct domain *d);
+int ioreq_server_create(struct domain *d, int bufioreq_handling,
+ ioservid_t *id);
+int ioreq_server_destroy(struct domain *d, ioservid_t id);
+int ioreq_server_get_info(struct domain *d, ioservid_t id,
+ unsigned long *ioreq_gfn,
+ unsigned long *bufioreq_gfn,
+ evtchn_port_t *bufioreq_port);
+int ioreq_server_get_frame(struct domain *d, ioservid_t id,
+ unsigned long idx, mfn_t *mfn);
+int ioreq_server_map_io_range(struct domain *d, ioservid_t id,
+ uint32_t type, uint64_t start,
+ uint64_t end);
+int ioreq_server_unmap_io_range(struct domain *d, ioservid_t id,
+ uint32_t type, uint64_t start,
+ uint64_t end);
+int ioreq_server_map_mem_type(struct domain *d, ioservid_t id,
+ uint32_t type, uint32_t flags);
+
+int ioreq_server_set_state(struct domain *d, ioservid_t id,
+ bool enabled);
+
+int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v);
+void ioreq_server_remove_vcpu_all(struct domain *d, struct vcpu *v);
+void ioreq_server_destroy_all(struct domain *d);
+
+struct ioreq_server *ioreq_server_select(struct domain *d,
+ ioreq_t *p);
+int ioreq_send(struct ioreq_server *s, ioreq_t *proto_p,
+ bool buffered);
+unsigned int ioreq_broadcast(ioreq_t *p, bool buffered);
+
+void ioreq_domain_init(struct domain *d);
#endif /* __XEN_IOREQ_H__ */