diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 90 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/syscalls/syscall.tbl | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 2 |
5 files changed, 24 insertions, 85 deletions
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index e486d1d78de2..569fecd7b5b2 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -14,23 +14,6 @@ * Generic iommu implementation */ -/* - * The coherent mask may be smaller than the real mask, check if we can - * really use a direct window. - */ -static inline bool dma_iommu_alloc_bypass(struct device *dev) -{ - return dev->archdata.iommu_bypass && !iommu_fixed_is_weak && - dma_direct_supported(dev, dev->coherent_dma_mask); -} - -static inline bool dma_iommu_map_bypass(struct device *dev, - unsigned long attrs) -{ - return dev->archdata.iommu_bypass && - (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING)); -} - /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. @@ -39,8 +22,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { - if (dma_iommu_alloc_bypass(dev)) - return dma_direct_alloc(dev, size, dma_handle, flag, attrs); return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, dev->coherent_dma_mask, flag, dev_to_node(dev)); @@ -50,11 +31,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - if (dma_iommu_alloc_bypass(dev)) - dma_direct_free(dev, size, vaddr, dma_handle, attrs); - else - iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, - dma_handle); + iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); } /* Creates TCEs for a user provided buffer. The user buffer must be @@ -67,9 +44,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, enum dma_data_direction direction, unsigned long attrs) { - if (dma_iommu_map_bypass(dev, attrs)) - return dma_direct_map_page(dev, page, offset, size, direction, - attrs); return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, size, dma_get_mask(dev), direction, attrs); } @@ -79,11 +53,8 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, unsigned long attrs) { - if (!dma_iommu_map_bypass(dev, attrs)) - iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, - direction, attrs); - else - dma_direct_unmap_page(dev, dma_handle, size, direction, attrs); + iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, + attrs); } @@ -91,8 +62,6 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) { - if (dma_iommu_map_bypass(dev, attrs)) - return dma_direct_map_sg(dev, sglist, nelems, direction, attrs); return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, dma_get_mask(dev), direction, attrs); } @@ -101,11 +70,8 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) { - if (!dma_iommu_map_bypass(dev, attrs)) - ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, + ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction, attrs); - else - dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs); } static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) @@ -113,8 +79,9 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) struct pci_dev *pdev = to_pci_dev(dev); struct pci_controller *phb = pci_bus_to_host(pdev->bus); - return phb->controller_ops.iommu_bypass_supported && - phb->controller_ops.iommu_bypass_supported(pdev, mask); + if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported) + return false; + return phb->controller_ops.iommu_bypass_supported(pdev, mask); } /* We support DMA to/from any memory page via the iommu */ @@ -123,7 +90,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) struct iommu_table *tbl = get_iommu_table_base(dev); if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { - dev->archdata.iommu_bypass = true; + dev->dma_ops_bypass = true; dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); return 1; } @@ -141,7 +108,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) } dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); - dev->archdata.iommu_bypass = false; + dev->dma_ops_bypass = false; return 1; } @@ -153,47 +120,12 @@ u64 dma_iommu_get_required_mask(struct device *dev) if (!tbl) return 0; - if (dev_is_pci(dev)) { - u64 bypass_mask = dma_direct_get_required_mask(dev); - - if (dma_iommu_bypass_supported(dev, bypass_mask)) - return bypass_mask; - } - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); mask += mask - 1; return mask; } -static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_single_for_cpu(dev, addr, size, dir); -} - -static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr, - size_t sz, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_single_for_device(dev, addr, sz, dir); -} - -extern void dma_iommu_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); -} - -extern void dma_iommu_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - if (dma_iommu_alloc_bypass(dev)) - dma_direct_sync_sg_for_device(dev, sgl, nents, dir); -} - const struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, @@ -203,10 +135,6 @@ const struct dma_map_ops dma_iommu_ops = { .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page, .get_required_mask = dma_iommu_get_required_mask, - .sync_single_for_cpu = dma_iommu_sync_for_cpu, - .sync_single_for_device = dma_iommu_sync_for_device, - .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu, - .sync_sg_for_device = dma_iommu_sync_sg_for_device, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, }; diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 0fc8bad878b2..446e54c3f71e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -3072,10 +3072,18 @@ do_hash_page: ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r5,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ + + /* + * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then + * don't call hash_page, just fail the fault. This is required to + * prevent re-entrancy problems in the hash code, namely perf + * interrupts hitting while something holds H_PAGE_BUSY, and taking a + * hash fault. See the comment in hash_preload(). + */ ld r11, PACA_THREAD_INFO(r13) - lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ - andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ - bne 77f /* then don't call hash_page now */ + lwz r0,TI_PREEMPT(r11) + andis. r0,r0,NMI_MASK@h + bne 77f /* * r3 contains the trap number diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4650b9bb217f..794b754deec2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1593,7 +1593,7 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) /* * Copy architecture-specific thread state */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, struct task_struct *p, unsigned long tls) { diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index f833a3190822..dd87a782d80e 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -525,6 +525,7 @@ 435 32 clone3 ppc_clone3 sys_clone3 435 64 clone3 sys_clone3 435 spu clone3 sys_ni_syscall +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 6fcae436ae51..f85539ebb513 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -183,6 +183,8 @@ static inline unsigned long read_spurr(unsigned long tb) #ifdef CONFIG_PPC_SPLPAR +#include <asm/dtl.h> + /* * Scan the dispatch trace log and count up the stolen time. * Should be called with interrupts disabled. |