From 1d61261bfe8ae34764aa5a9d68af4ab15237719e Mon Sep 17 00:00:00 2001 From: "Fabio M. De Francesco" Date: Thu, 1 Sep 2022 15:29:06 +0200 Subject: swiotlb: replace kmap_atomic() with memcpy_{from,to}_page() The use of kmap_atomic() is being deprecated in favor of kmap_local_page(), which can also be used in atomic context (including interrupts). Replace kmap_atomic() with kmap_local_page(). Instead of open coding mapping, memcpy(), and un-mapping, use the memcpy_{from,to}_page() helper. Suggested-by: Ira Weiny Signed-off-by: Fabio M. De Francesco Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 0ef6b12f961d..c54da87ec4aa 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -545,9 +545,8 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size } if (PageHighMem(pfn_to_page(pfn))) { - /* The buffer does not have a mapping. Map it in and copy */ unsigned int offset = orig_addr & ~PAGE_MASK; - char *buffer; + struct page *page; unsigned int sz = 0; unsigned long flags; @@ -555,12 +554,11 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size sz = min_t(size_t, PAGE_SIZE - offset, size); local_irq_save(flags); - buffer = kmap_atomic(pfn_to_page(pfn)); + page = pfn_to_page(pfn); if (dir == DMA_TO_DEVICE) - memcpy(vaddr, buffer + offset, sz); + memcpy_from_page(vaddr, page, offset, sz); else - memcpy(buffer + offset, vaddr, sz); - kunmap_atomic(buffer); + memcpy_to_page(page, offset, vaddr, sz); local_irq_restore(flags); size -= sz; -- cgit From 639205ed206f98fcfa826933946f0844615784ea Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 7 Sep 2022 14:38:33 +0100 Subject: swiotlb: don't panic! The panics in swiotlb are relics of a bygone era, some of them inadvertently inherited from a memblock refactor, and all of them unnecessary since they are in places that may also fail gracefully anyway. Convert the panics in swiotlb_init_remap() into non-fatal warnings more consistent with the other bail-out paths there and in swiotlb_init_late() (but don't bother trying to roll anything back, since if anything does actually fail that early, the aim is merely to keep going as far as possible to get more diagnostic information out of the inevitably-dying kernel). It's not for SWIOTLB to decide that the system is terminally compromised just because there *might* turn out to be one or more 32-bit devices that might want to make streaming DMA mappings, especially since we already handle the no-buffer case later if it turns out someone did want it. Similarly though, downgrade that panic in swiotlb_tbl_map_single(), since even if we do get to that point it's an overly extreme reaction. It makes little difference to the DMA API caller whether a mapping fails because the buffer is full or because there is no buffer, and once again it's not for SWIOTLB to presume that any particular DMA mapping is so fundamental to the operation of the system that it must be terminal if it could never succeed. Even if the caller handles failure by futilely retrying forever, a single stuck thread is considerably less impactful to the user than a needless panic. Signed-off-by: Robin Murphy Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index c54da87ec4aa..339a990554e7 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -346,22 +346,27 @@ retry: memblock_free(tlb, PAGE_ALIGN(bytes)); nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); - if (nslabs < IO_TLB_MIN_SLABS) - panic("%s: Failed to remap %zu bytes\n", - __func__, bytes); - goto retry; + if (nslabs >= IO_TLB_MIN_SLABS) + goto retry; + + pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); + return; } alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); - if (!mem->slots) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); + if (!mem->slots) { + pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n", + __func__, alloc_size, PAGE_SIZE); + return; + } mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), default_nareas), SMP_CACHE_BYTES); - if (!mem->areas) - panic("%s: Failed to allocate mem->areas.\n", __func__); + if (!mem->areas) { + pr_warn("%s: Failed to allocate mem->areas.\n", __func__); + return; + } swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false, default_nareas); @@ -729,8 +734,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, int index; phys_addr_t tlb_addr; - if (!mem || !mem->nslabs) - panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); + if (!mem || !mem->nslabs) { + dev_warn_ratelimited(dev, + "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); + return (phys_addr_t)DMA_MAPPING_ERROR; + } if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); -- cgit From 91fd38ea7589fd8276720c20eadacd980be8bbe4 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Thu, 22 Sep 2022 12:00:01 +0200 Subject: MAINTAINERS: merge SWIOTLB SUBSYSTEM into DMA MAPPING HELPERS Commit 78013eaadf69 ("x86: remove the IOMMU table infrastructure") refactored the generic swiotlb/swiotlb-xen setup into pci-dma.c, but misses to adjust MAINTAINERS. Hence, ./scripts/get_maintainer.pl --self-test=patterns complains about broken references. As only include/linux/swiotlb.h is unique to the SWIOTLB SUBSYSTEM section and Christoph is maintainer of DMA MAPPING HELPERS and SWIOTLB SUBSYSTEM, just merge those two sections. Leave the small architecture-dependent pieces to the arch maintainers. Further, update the XEN SWIOTLB SUBSYSTEM to include all swiotlb-xen headers and replace the pattern in drivers with the specific one file that matches this pattern. Signed-off-by: Lukas Bulwahn Acked-by: Juergen Gross Signed-off-by: Christoph Hellwig --- MAINTAINERS | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 9ae989b32ebb..d934ec885b6c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6147,6 +6147,7 @@ F: include/asm-generic/dma-mapping.h F: include/linux/dma-direct.h F: include/linux/dma-mapping.h F: include/linux/dma-map-ops.h +F: include/linux/swiotlb.h F: kernel/dma/ DMA MAPPING BENCHMARK @@ -19581,16 +19582,6 @@ S: Maintained F: Documentation/admin-guide/svga.rst F: arch/x86/boot/video* -SWIOTLB SUBSYSTEM -M: Christoph Hellwig -L: iommu@lists.linux.dev -S: Supported -W: http://git.infradead.org/users/hch/dma-mapping.git -T: git git://git.infradead.org/users/hch/dma-mapping.git -F: arch/*/kernel/pci-swiotlb.c -F: include/linux/swiotlb.h -F: kernel/dma/swiotlb.c - SWITCHDEV M: Jiri Pirko M: Ivan Vecera @@ -22286,8 +22277,10 @@ M: Stefano Stabellini L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: iommu@lists.linux.dev S: Supported -F: arch/x86/xen/*swiotlb* -F: drivers/xen/*swiotlb* +F: arch/*/include/asm/xen/swiotlb-xen.h +F: drivers/xen/swiotlb-xen.c +F: include/xen/arm/swiotlb-xen.h +F: include/xen/swiotlb-xen.h XFS FILESYSTEM C: irc://irc.oftc.net/xfs -- cgit From f7f04d198334d6f69c76c8f8675194551a3da574 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 23 Sep 2022 20:38:35 +0900 Subject: lib/sg_pool: change module_init(sg_pool_init) to subsys_initcall sg_alloc_table_chained() is called by several drivers, but if it is called before sg_pool_init(), it results in a NULL pointer dereference in sg_pool_alloc(). Since commit 9b1d6c895002 ("lib: scatterlist: move SG pool code from SCSI driver to lib/sg_pool.c"), we rely on module_init(sg_pool_init) is invoked before other module_init calls but this assumption is fragile. I slightly changed the link order while refactoring Kbuild, then uncovered this issue. I should keep the current link order, but depending on a specific call order among module_init is so fragile. We usually define the init order by specifying *_initcall correctly, or delay the driver probing by returning -EPROBE_DEFER. Change module_initcall() to subsys_initcall(), and also delete the pointless module_exit() because lib/sg_pool.c is always compiled as built-in. (CONFIG_SG_POOL is bool) Link: https://lore.kernel.org/all/20220921043946.GA1355561@roeck-us.net/ Link: https://lore.kernel.org/all/8e70837d-d859-dfb2-bf7f-83f8b31467bc@samsung.com/ Fixes: 9b1d6c895002 ("lib: scatterlist: move SG pool code from SCSI driver to lib/sg_pool.c") Reported-by: Guenter Roeck Reported-by: Marek Szyprowski Signed-off-by: Masahiro Yamada Reviewed-by: Robin Murphy Tested-by: Marek Szyprowski Signed-off-by: Christoph Hellwig --- lib/sg_pool.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/lib/sg_pool.c b/lib/sg_pool.c index a0b1a52cd6f7..9bfe60ca3f37 100644 --- a/lib/sg_pool.c +++ b/lib/sg_pool.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -#include +#include #include #include #include @@ -177,16 +177,4 @@ cleanup_sdb: return -ENOMEM; } -static __exit void sg_pool_exit(void) -{ - int i; - - for (i = 0; i < SG_MEMPOOL_NR; i++) { - struct sg_pool *sgp = sg_pools + i; - mempool_destroy(sgp->pool); - kmem_cache_destroy(sgp->slab); - } -} - -module_init(sg_pool_init); -module_exit(sg_pool_exit); +subsys_initcall(sg_pool_init); -- cgit From 49bc8bebae79c8516cb12f91818f3a7907e3ebce Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 4 Oct 2022 09:10:19 +0200 Subject: ARM/dma-mappіng: don't override ->dma_coherent when set from a bus notifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit ae626eb97376 ("ARM/dma-mapping: use dma-direct unconditionally") caused a regression on the mvebu platform, wherein devices that are dma-coherent are marked as dma-noncoherent, because although mvebu_hwcc_notifier() after that commit still marks then as coherent, the arm_coherent_dma_ops() function, which is called later, overwrites this setting, since it is being called from drivers/of/device.c with coherency parameter determined by of_dma_is_coherent(), and the device-trees do not declare the 'dma-coherent' property. Fix this by defaulting never clearing the dma_coherent flag in arm_coherent_dma_ops(). Fixes: ae626eb97376 ("ARM/dma-mapping: use dma-direct unconditionally") Reported-by: Marek Behún Signed-off-by: Christoph Hellwig Reviewed-by: Russell King (Oracle) Tested-by: Marek Behún --- arch/arm/mm/dma-mapping.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 089c9c644cce..bfc7476f1411 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1769,8 +1769,16 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - dev->archdata.dma_coherent = coherent; - dev->dma_coherent = coherent; + /* + * Due to legacy code that sets the ->dma_coherent flag from a bus + * notifier we can't just assign coherent to the ->dma_coherent flag + * here, but instead have to make sure we only set but never clear it + * for now. + */ + if (coherent) { + dev->archdata.dma_coherent = true; + dev->dma_coherent = true; + } /* * Don't override the dma_ops if they have already been set. Ideally -- cgit From c9cb01369b926a074004714ab9f9b65f75bf3c60 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 6 Oct 2022 09:43:01 +0200 Subject: ARM/dma-mapping: remove the dma_coherent member of struct dev_archdata Since commit ae626eb97376 ("ARM/dma-mapping: use dma-direct unconditionally") only the dma_coherent flag in struct device is used, so remove the now write only flag in struct dev_archdata. Signed-off-by: Christoph Hellwig Reviewed-by: Russell King (Oracle) --- arch/arm/include/asm/device.h | 1 - arch/arm/mm/dma-mapping.c | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 8754c0f5fc90..c6beb1708c64 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -9,7 +9,6 @@ struct dev_archdata { #ifdef CONFIG_ARM_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; #endif - unsigned int dma_coherent:1; unsigned int dma_ops_setup:1; }; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index bfc7476f1411..f60d6b4afe5d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1775,10 +1775,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, * here, but instead have to make sure we only set but never clear it * for now. */ - if (coherent) { - dev->archdata.dma_coherent = true; + if (coherent) dev->dma_coherent = true; - } /* * Don't override the dma_ops if they have already been set. Ideally -- cgit