summaryrefslogtreecommitdiff
path: root/xen
diff options
context:
space:
mode:
authorPaul Durrant <pdurrant@amazon.com>2020-11-27 18:03:42 +0100
committerJan Beulich <jbeulich@suse.com>2020-11-27 18:03:42 +0100
commit25ccd093425ce1d3a9f33ecd82e92f35952b8496 (patch)
tree643c3217b4142a94f54edf69cc56440f1ac5e831 /xen
parent181f2c224ccd0a2900d6ae94ec390a546731f593 (diff)
iommu: remove the share_p2m operation
Sharing of HAP tables is now VT-d specific so the operation is never defined for AMD IOMMU any more. There's also no need to pro-actively set vtd.pgd_maddr when using shared EPT as it is straightforward to simply define a helper function to return the appropriate value in the shared and non-shared cases. NOTE: This patch also modifies unmap_vtd_domain_page() to take a const pointer since the only thing it calls, unmap_domain_page(), also takes a const pointer. Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/mm/p2m.c3
-rw-r--r--xen/drivers/passthrough/iommu.c8
-rw-r--r--xen/drivers/passthrough/vtd/extern.h2
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c90
-rw-r--r--xen/drivers/passthrough/vtd/x86/vtd.c2
-rw-r--r--xen/include/xen/iommu.h3
6 files changed, 52 insertions, 56 deletions
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index d9cc1856bb..db6cc2202d 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -727,9 +727,6 @@ int p2m_alloc_table(struct p2m_domain *p2m)
p2m->phys_table = pagetable_from_mfn(top_mfn);
- if ( hap_enabled(d) )
- iommu_share_p2m_table(d);
-
p2m_unlock(p2m);
return 0;
}
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 87f9a857bb..a58703d3fa 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -523,14 +523,6 @@ int iommu_do_domctl(
return ret;
}
-void iommu_share_p2m_table(struct domain* d)
-{
- ASSERT(hap_enabled(d));
-
- if ( iommu_use_hap_pt(d) )
- iommu_get_ops()->share_p2m(d);
-}
-
void iommu_crash_shutdown(void)
{
if ( !iommu_crash_disable )
diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h
index ad6c5f907b..19a908ab4f 100644
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -72,7 +72,7 @@ void flush_all_cache(void);
uint64_t alloc_pgtable_maddr(unsigned long npages, nodeid_t node);
void free_pgtable_maddr(u64 maddr);
void *map_vtd_domain_page(u64 maddr);
-void unmap_vtd_domain_page(void *va);
+void unmap_vtd_domain_page(const void *va);
int domain_context_mapping_one(struct domain *domain, struct vtd_iommu *iommu,
u8 bus, u8 devfn, const struct pci_dev *);
int domain_context_unmap_one(struct domain *domain, struct vtd_iommu *iommu,
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index f6c4021fd6..a76e60c99a 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -318,6 +318,48 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
return pte_maddr;
}
+static uint64_t domain_pgd_maddr(struct domain *d, unsigned int nr_pt_levels)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+ uint64_t pgd_maddr;
+ unsigned int agaw;
+
+ ASSERT(spin_is_locked(&hd->arch.mapping_lock));
+
+ if ( iommu_use_hap_pt(d) )
+ {
+ pagetable_t pgt = p2m_get_pagetable(p2m_get_hostp2m(d));
+
+ return pagetable_get_paddr(pgt);
+ }
+
+ if ( !hd->arch.vtd.pgd_maddr )
+ {
+ /* Ensure we have pagetables allocated down to leaf PTE. */
+ addr_to_dma_page_maddr(d, 0, 1);
+
+ if ( !hd->arch.vtd.pgd_maddr )
+ return 0;
+ }
+
+ pgd_maddr = hd->arch.vtd.pgd_maddr;
+
+ /* Skip top levels of page tables for 2- and 3-level DRHDs. */
+ for ( agaw = level_to_agaw(4);
+ agaw != level_to_agaw(nr_pt_levels);
+ agaw-- )
+ {
+ const struct dma_pte *p = map_vtd_domain_page(pgd_maddr);
+
+ pgd_maddr = dma_pte_addr(*p);
+ unmap_vtd_domain_page(p);
+ if ( !pgd_maddr )
+ return 0;
+ }
+
+ return pgd_maddr;
+}
+
static void iommu_flush_write_buffer(struct vtd_iommu *iommu)
{
u32 val;
@@ -1286,7 +1328,7 @@ int domain_context_mapping_one(
struct context_entry *context, *context_entries;
u64 maddr, pgd_maddr;
u16 seg = iommu->drhd->segment;
- int agaw, rc, ret;
+ int rc, ret;
bool_t flush_dev_iotlb;
ASSERT(pcidevs_locked());
@@ -1340,37 +1382,18 @@ int domain_context_mapping_one(
if ( iommu_hwdom_passthrough && is_hardware_domain(domain) )
{
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
- agaw = level_to_agaw(iommu->nr_pt_levels);
}
else
{
spin_lock(&hd->arch.mapping_lock);
- /* Ensure we have pagetables allocated down to leaf PTE. */
- if ( hd->arch.vtd.pgd_maddr == 0 )
- {
- addr_to_dma_page_maddr(domain, 0, 1);
- if ( hd->arch.vtd.pgd_maddr == 0 )
- {
- nomem:
- spin_unlock(&hd->arch.mapping_lock);
- spin_unlock(&iommu->lock);
- unmap_vtd_domain_page(context_entries);
- return -ENOMEM;
- }
- }
-
- /* Skip top levels of page tables for 2- and 3-level DRHDs. */
- pgd_maddr = hd->arch.vtd.pgd_maddr;
- for ( agaw = level_to_agaw(4);
- agaw != level_to_agaw(iommu->nr_pt_levels);
- agaw-- )
+ pgd_maddr = domain_pgd_maddr(domain, iommu->nr_pt_levels);
+ if ( !pgd_maddr )
{
- struct dma_pte *p = map_vtd_domain_page(pgd_maddr);
- pgd_maddr = dma_pte_addr(*p);
- unmap_vtd_domain_page(p);
- if ( pgd_maddr == 0 )
- goto nomem;
+ spin_unlock(&hd->arch.mapping_lock);
+ spin_unlock(&iommu->lock);
+ unmap_vtd_domain_page(context_entries);
+ return -ENOMEM;
}
context_set_address_root(*context, pgd_maddr);
@@ -1389,7 +1412,7 @@ int domain_context_mapping_one(
return -EFAULT;
}
- context_set_address_width(*context, agaw);
+ context_set_address_width(*context, level_to_agaw(iommu->nr_pt_levels));
context_set_fault_enable(*context);
context_set_present(*context);
iommu_sync_cache(context, sizeof(struct context_entry));
@@ -1848,18 +1871,6 @@ static int __init vtd_ept_page_compatible(struct vtd_iommu *iommu)
(ept_has_1gb(ept_cap) && opt_hap_1gb) <= cap_sps_1gb(vtd_cap);
}
-/*
- * set VT-d page table directory to EPT table if allowed
- */
-static void iommu_set_pgd(struct domain *d)
-{
- mfn_t pgd_mfn;
-
- pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
- dom_iommu(d)->arch.vtd.pgd_maddr =
- pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
-}
-
static int rmrr_identity_mapping(struct domain *d, bool_t map,
const struct acpi_rmrr_unit *rmrr,
u32 flag)
@@ -2718,7 +2729,6 @@ static struct iommu_ops __initdata vtd_ops = {
.adjust_irq_affinities = adjust_vtd_irq_affinities,
.suspend = vtd_suspend,
.resume = vtd_resume,
- .share_p2m = iommu_set_pgd,
.crash_shutdown = vtd_crash_shutdown,
.iotlb_flush = iommu_flush_iotlb_pages,
.iotlb_flush_all = iommu_flush_iotlb_all,
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c
index bbe358dc36..6681dccd69 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -42,7 +42,7 @@ void *map_vtd_domain_page(u64 maddr)
return map_domain_page(_mfn(paddr_to_pfn(maddr)));
}
-void unmap_vtd_domain_page(void *va)
+void unmap_vtd_domain_page(const void *va)
{
unmap_domain_page(va);
}
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 191021870f..8d36f0b2b2 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -277,7 +277,6 @@ struct iommu_ops {
int __must_check (*suspend)(void);
void (*resume)(void);
- void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
unsigned long page_count,
@@ -354,8 +353,6 @@ void iommu_resume(void);
void iommu_crash_shutdown(void);
int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
-void iommu_share_p2m_table(struct domain *d);
-
#ifdef CONFIG_HAS_PCI
int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));