summaryrefslogtreecommitdiff
path: root/xen
diff options
context:
space:
mode:
authorOleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>2020-11-30 12:31:23 +0200
committerAlex Bennée <alex.bennee@linaro.org>2020-11-30 15:12:48 +0000
commit4f9d392befa09b4b56d92a5b306e510425c4513e (patch)
treebc01775db397f2b035a2ef2e56231ac7a0929c50 /xen
parent975a5afaf5c7e7a8b413acfc5a7b6b7ea4483eff (diff)
xen/ioreq: Move x86's ioreq_server to struct domain
The IOREQ is a common feature now and this struct will be used on Arm as is. Move it to common struct domain. This also significantly reduces the layering violation in the common code (*arch.hvm* usage). We don't move ioreq_gfn since it is not used in the common code (the "legacy" mechanism is x86 specific). Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> CC: Julien Grall <julien.grall@arm.com> Message-Id: <1606732298-22107-9-git-send-email-olekstysh@gmail.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/common/ioreq.c60
-rw-r--r--xen/include/asm-x86/hvm/domain.h8
-rw-r--r--xen/include/xen/sched.h10
3 files changed, 40 insertions, 38 deletions
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 3e80fc61cf..b7c2d5a0df 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -38,13 +38,13 @@ static void set_ioreq_server(struct domain *d, unsigned int id,
struct ioreq_server *s)
{
ASSERT(id < MAX_NR_IOREQ_SERVERS);
- ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+ ASSERT(!s || !d->ioreq_server.server[id]);
- d->arch.hvm.ioreq_server.server[id] = s;
+ d->ioreq_server.server[id] = s;
}
#define GET_IOREQ_SERVER(d, id) \
- (d)->arch.hvm.ioreq_server.server[id]
+ (d)->ioreq_server.server[id]
static struct ioreq_server *get_ioreq_server(const struct domain *d,
unsigned int id)
@@ -285,7 +285,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
unsigned int id;
bool found = false;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
@@ -296,7 +296,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
}
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return found;
}
@@ -606,7 +606,7 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
return -ENOMEM;
domain_pause(d);
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
{
@@ -634,13 +634,13 @@ int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
if ( id )
*id = i;
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
domain_unpause(d);
return 0;
fail:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
domain_unpause(d);
xfree(s);
@@ -652,7 +652,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -684,7 +684,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -697,7 +697,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -731,7 +731,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -744,7 +744,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
ASSERT(is_hvm_domain(d));
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -782,7 +782,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
}
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -798,7 +798,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -834,7 +834,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
rc = rangeset_add_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -850,7 +850,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
if ( start > end )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -886,7 +886,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
rc = rangeset_remove_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -911,7 +911,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -926,7 +926,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
rc = arch_ioreq_server_map_mem_type(d, s, flags);
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -937,7 +937,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
struct ioreq_server *s;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -961,7 +961,7 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -971,7 +971,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
unsigned int id;
int rc;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
@@ -980,7 +980,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
goto fail;
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return 0;
@@ -995,7 +995,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
hvm_ioreq_server_remove_vcpu(s, v);
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
return rc;
}
@@ -1005,12 +1005,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
struct ioreq_server *s;
unsigned int id;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
}
void hvm_destroy_all_ioreq_servers(struct domain *d)
@@ -1021,7 +1021,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
if ( !arch_ioreq_server_destroy_all(d) )
return;
- spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_recursive(&d->ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
@@ -1039,7 +1039,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
xfree(s);
}
- spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
+ spin_unlock_recursive(&d->ioreq_server.lock);
}
struct ioreq_server *hvm_select_ioreq_server(struct domain *d,
@@ -1271,7 +1271,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
void hvm_ioreq_init(struct domain *d)
{
- spin_lock_init(&d->arch.hvm.ioreq_server.lock);
+ spin_lock_init(&d->ioreq_server.lock);
arch_ioreq_domain_init(d);
}
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 1c4ca47919..b8be1ad1eb 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -63,8 +63,6 @@ struct hvm_pi_ops {
void (*vcpu_block)(struct vcpu *);
};
-#define MAX_NR_IOREQ_SERVERS 8
-
struct hvm_domain {
/* Guest page range used for non-default ioreq servers */
struct {
@@ -73,12 +71,6 @@ struct hvm_domain {
unsigned long legacy_mask; /* indexed by HVM param number */
} ioreq_gfn;
- /* Lock protects all other values in the sub-struct and the default */
- struct {
- spinlock_t lock;
- struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
- } ioreq_server;
-
/* Cached CF8 for guest PCI config cycles */
uint32_t pci_cf8;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index a345cc01f8..62cbcdb1b8 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -316,6 +316,8 @@ struct sched_unit {
struct evtchn_port_ops;
+#define MAX_NR_IOREQ_SERVERS 8
+
struct domain
{
domid_t domain_id;
@@ -523,6 +525,14 @@ struct domain
/* Argo interdomain communication support */
struct argo_domain *argo;
#endif
+
+#ifdef CONFIG_IOREQ_SERVER
+ /* Lock protects all other values in the sub-struct and the default */
+ struct {
+ spinlock_t lock;
+ struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
+ } ioreq_server;
+#endif
};
static inline struct page_list_head *page_to_list(