From e7de6c7cc207be78369d45fb833d7d53aeda47f8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:23 +0100 Subject: dma/swiotlb: Remove swiotlb_set_mem_attributes() Now that set_memory_decrypted() is always available we can just call it directly. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Tom Lendacky Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-12-hch@lst.de Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c43ec2271469..005d1d87bb2e 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void) return size ? size : (IO_TLB_DEFAULT_SIZE); } -void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { } - /* For swiotlb, clear memory encryption mask from dma addresses */ static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, phys_addr_t address) @@ -209,12 +208,12 @@ void __init swiotlb_update_mem_attributes(void) vaddr = phys_to_virt(io_tlb_start); bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); vaddr = phys_to_virt(io_tlb_overflow_buffer); bytes = PAGE_ALIGN(io_tlb_overflow); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); } @@ -355,7 +354,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_start = virt_to_phys(tlb); io_tlb_end = io_tlb_start + bytes; - swiotlb_set_mem_attributes(tlb, bytes); + set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); /* @@ -366,7 +365,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!v_overflow_buffer) goto cleanup2; - swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); + set_memory_decrypted((unsigned long)v_overflow_buffer, + io_tlb_overflow >> PAGE_SHIFT); memset(v_overflow_buffer, 0, io_tlb_overflow); io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); -- cgit v1.2.3 From b6e05477c10c12e36141558fc14f04b00ea634d4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:24 +0100 Subject: dma/direct: Handle the memory encryption bit in common code Give the basic phys_to_dma() and dma_to_phys() helpers a __-prefix and add the memory encryption mask to the non-prefixed versions. Use the __-prefixed versions directly instead of clearing the mask again in various places. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-13-hch@lst.de Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 005d1d87bb2e..8b06b4485e65 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -157,13 +157,6 @@ unsigned long swiotlb_size_or_default(void) return size ? size : (IO_TLB_DEFAULT_SIZE); } -/* For swiotlb, clear memory encryption mask from dma addresses */ -static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, - phys_addr_t address) -{ - return __sme_clr(phys_to_dma(hwdev, address)); -} - /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -622,7 +615,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, return SWIOTLB_MAP_ERROR; } - start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); + start_dma_addr = __phys_to_dma(hwdev, io_tlb_start); return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir, attrs); } @@ -726,12 +719,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, goto out_warn; phys_addr = swiotlb_tbl_map_single(dev, - swiotlb_phys_to_dma(dev, io_tlb_start), + __phys_to_dma(dev, io_tlb_start), 0, size, DMA_FROM_DEVICE, 0); if (phys_addr == SWIOTLB_MAP_ERROR) goto out_warn; - *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); + *dma_handle = __phys_to_dma(dev, phys_addr); if (dma_coherent_ok(dev, *dma_handle, size)) goto out_unmap; @@ -867,10 +860,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, map = map_single(dev, phys, size, dir, attrs); if (map == SWIOTLB_MAP_ERROR) { swiotlb_full(dev, size, dir, 1); - return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); + return __phys_to_dma(dev, io_tlb_overflow_buffer); } - dev_addr = swiotlb_phys_to_dma(dev, map); + dev_addr = __phys_to_dma(dev, map); /* Ensure that the address returned is DMA'ble */ if (dma_capable(dev, dev_addr, size)) @@ -879,7 +872,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); + return __phys_to_dma(dev, io_tlb_overflow_buffer); } /* @@ -1009,7 +1002,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, sg_dma_len(sgl) = 0; return 0; } - sg->dma_address = swiotlb_phys_to_dma(hwdev, map); + sg->dma_address = __phys_to_dma(hwdev, map); } else sg->dma_address = dev_addr; sg_dma_len(sg) = sg->length; @@ -1073,7 +1066,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { - return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); + return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer)); } /* @@ -1085,7 +1078,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) int swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; + return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; } #ifdef CONFIG_DMA_DIRECT_OPS -- cgit v1.2.3 From c10f07aa27dadf5ab5b3d58c48c91a467f80db49 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:25 +0100 Subject: dma/direct: Handle force decryption for DMA coherent buffers in common code With that in place the generic DMA-direct routines can be used to allocate non-encrypted bounce buffers, and the x86 SEV case can use the generic swiotlb ops including nice features such as using CMA allocations. Note that I'm not too happy about using sev_active() in DMA-direct, but I couldn't come up with a good enough name for a wrapper to make it worth adding. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-14-hch@lst.de Signed-off-by: Ingo Molnar --- lib/dma-direct.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/dma-direct.c b/lib/dma-direct.c index c9e8e21cb334..1277d293d4da 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c @@ -9,6 +9,7 @@ #include #include #include +#include #define DIRECT_MAPPING_ERROR 0 @@ -20,6 +21,14 @@ #define ARCH_ZONE_DMA_BITS 24 #endif +/* + * For AMD SEV all DMA must be to unencrypted addresses. + */ +static inline bool force_dma_unencrypted(void) +{ + return sev_active(); +} + static bool check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, const char *caller) @@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { - return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; + dma_addr_t addr = force_dma_unencrypted() ? + __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); + return addr + size - 1 <= dev->coherent_dma_mask; } void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, @@ -46,6 +57,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; int page_order = get_order(size); struct page *page = NULL; + void *ret; /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) @@ -78,10 +90,15 @@ again: if (!page) return NULL; - - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - memset(page_address(page), 0, size); - return page_address(page); + ret = page_address(page); + if (force_dma_unencrypted()) { + set_memory_decrypted((unsigned long)ret, 1 << page_order); + *dma_handle = __phys_to_dma(dev, page_to_phys(page)); + } else { + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + } + memset(ret, 0, size); + return ret; } /* @@ -92,9 +109,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned int page_order = get_order(size); + if (force_dma_unencrypted()) + set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) - free_pages((unsigned long)cpu_addr, get_order(size)); + free_pages((unsigned long)cpu_addr, page_order); } static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, -- cgit v1.2.3 From 16e73adbca76fd18733278cb688b0ddb4cad162c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:26 +0100 Subject: dma/swiotlb: Remove swiotlb_{alloc,free}_coherent() Unused now that everyone uses swiotlb_{alloc,free}(). Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-15-hch@lst.de Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 8b06b4485e65..15954b86f09e 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -157,13 +157,6 @@ unsigned long swiotlb_size_or_default(void) return size ? size : (IO_TLB_DEFAULT_SIZE); } -/* Note that this doesn't work with highmem page */ -static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, - volatile void *address) -{ - return phys_to_dma(hwdev, virt_to_phys(address)); -} - static bool no_iotlb_memory; void swiotlb_print_info(void) @@ -752,28 +745,6 @@ out_warn: return NULL; } -void * -swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) -{ - int order = get_order(size); - unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0; - void *ret; - - ret = (void *)__get_free_pages(flags, order); - if (ret) { - *dma_handle = swiotlb_virt_to_bus(hwdev, ret); - if (dma_coherent_ok(hwdev, *dma_handle, size)) { - memset(ret, 0, size); - return ret; - } - free_pages((unsigned long)ret, order); - } - - return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs); -} -EXPORT_SYMBOL(swiotlb_alloc_coherent); - static bool swiotlb_free_buffer(struct device *dev, size_t size, dma_addr_t dma_addr) { @@ -793,15 +764,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, return true; } -void -swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dev_addr) -{ - if (!swiotlb_free_buffer(hwdev, size, dev_addr)) - free_pages((unsigned long)vaddr, get_order(size)); -} -EXPORT_SYMBOL(swiotlb_free_coherent); - static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, int do_panic) -- cgit v1.2.3 From 0803e6051c1562e1525c3e044313390bf8b35c2b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 23 Mar 2018 18:49:30 +0100 Subject: swiotlb: Make swiotlb_{alloc,free}_buffer depend on CONFIG_DMA_DIRECT_OPS Otherwise this causes unused symbol warnings for configs that build swiotlb.c only for use by xen-swiotlb.c and that don't otherwise select CONFIG_DMA_DIRECT_OPS, which is possible on arm. Fixes: 16e73adbca76 ("dma/swiotlb: Remove swiotlb_{alloc,free}_coherent()") Reported-by: Stephen Rothwell Signed-off-by: Christoph Hellwig Signed-off-by: Thomas Gleixner Cc: iommu@lists.linux-foundation.org Cc: konrad.wilk@oracle.com Link: https://lkml.kernel.org/r/20180323174930.17767-1-hch@lst.de --- lib/swiotlb.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 15954b86f09e..47aeb04c1997 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -692,6 +692,7 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, } } +#ifdef CONFIG_DMA_DIRECT_OPS static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, size_t size) { @@ -763,6 +764,7 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, DMA_ATTR_SKIP_CPU_SYNC); return true; } +#endif static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, -- cgit v1.2.3 From e89f5b37015309a8bdf0b21d08007580b92f92a4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 28 Mar 2018 15:35:35 +0200 Subject: dma-mapping: Don't clear GFP_ZERO in dma_alloc_attrs Revert the clearing of __GFP_ZERO in dma_alloc_attrs and move it to dma_direct_alloc for now. While most common architectures always zero dma cohereny allocations (and x86 did so since day one) this is not documented and at least arc and s390 do not zero without the explicit __GFP_ZERO argument. Fixes: 57bf5a8963f8 ("dma-mapping: clear harmful GFP_* flags in common code") Reported-by: Evgeniy Didin Reported-by: Sebastian Ott Signed-off-by: Christoph Hellwig Signed-off-by: Thomas Gleixner Tested-by: Evgeniy Didin Cc: iommu@lists.linux-foundation.org Link: https://lkml.kernel.org/r/20180328133535.17302-2-hch@lst.de --- lib/dma-direct.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'lib') diff --git a/lib/dma-direct.c b/lib/dma-direct.c index 1277d293d4da..c0bba30fef0a 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c @@ -59,6 +59,9 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, struct page *page = NULL; void *ret; + /* we always manually zero the memory once we are done: */ + gfp &= ~__GFP_ZERO; + /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) gfp |= GFP_DMA; -- cgit v1.2.3