aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu/io-pgtable-arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r--drivers/iommu/io-pgtable-arm.c114
1 files changed, 33 insertions, 81 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 74f58c6ac30c..6b9bb58a414f 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -211,6 +211,18 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
}
+/*
+ * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
+ * a concatenated PGD, into the maximum number of entries that can be
+ * mapped in the same table page.
+ */
+static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
+{
+ int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
+
+ return ptes_per_table - (i & (ptes_per_table - 1));
+}
+
static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages)
@@ -402,7 +414,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* If we can install a leaf entry at this level, then do so */
if (size == block_size) {
- max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
+ max_entries = arm_lpae_max_entries(map_idx_start, data);
num_entries = min_t(int, pgcount, max_entries);
ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
if (!ret)
@@ -585,66 +597,6 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
kfree(data);
}
-static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
- struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size,
- arm_lpae_iopte blk_pte, int lvl,
- arm_lpae_iopte *ptep, size_t pgcount)
-{
- struct io_pgtable_cfg *cfg = &data->iop.cfg;
- arm_lpae_iopte pte, *tablep;
- phys_addr_t blk_paddr;
- size_t tablesz = ARM_LPAE_GRANULE(data);
- size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
- int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
- int i, unmap_idx_start = -1, num_entries = 0, max_entries;
-
- if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
- return 0;
-
- tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
- if (!tablep)
- return 0; /* Bytes unmapped */
-
- if (size == split_sz) {
- unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
- max_entries = ptes_per_table - unmap_idx_start;
- num_entries = min_t(int, pgcount, max_entries);
- }
-
- blk_paddr = iopte_to_paddr(blk_pte, data);
- pte = iopte_prot(blk_pte);
-
- for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
- /* Unmap! */
- if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
- continue;
-
- __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
- }
-
- pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
- if (pte != blk_pte) {
- __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
- /*
- * We may race against someone unmapping another part of this
- * block, but anything else is invalid. We can't misinterpret
- * a page entry here since we're never at the last level.
- */
- if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
- return 0;
-
- tablep = iopte_deref(pte, data);
- } else if (unmap_idx_start >= 0) {
- for (i = 0; i < num_entries; i++)
- io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
-
- return num_entries * size;
- }
-
- return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
-}
-
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, size_t pgcount,
@@ -666,7 +618,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
- max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
+ max_entries = arm_lpae_max_entries(unmap_idx_start, data);
num_entries = min_t(int, pgcount, max_entries);
/* Find and handle non-leaf entries */
@@ -694,12 +646,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
return i * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
- /*
- * Insert a table at the next level to map the old region,
- * minus the part we want to unmap
- */
- return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
- lvl + 1, ptep, pgcount);
+ WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
+ return 0;
}
/* Keep on walkin' */
@@ -1362,19 +1310,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
iova += SZ_1G;
}
- /* Partial unmap */
- size = 1UL << __ffs(cfg->pgsize_bitmap);
- if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
- return __FAIL(ops, i);
-
- /* Remap of partial unmap */
- if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
- IOMMU_READ, GFP_KERNEL, &mapped))
- return __FAIL(ops, i);
-
- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
- return __FAIL(ops, i);
-
/* Full unmap */
iova = 0;
for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
@@ -1397,6 +1332,23 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
iova += SZ_1G;
}
+ /*
+ * Map/unmap the last largest supported page of the IAS, this can
+ * trigger corner cases in the concatednated page tables.
+ */
+ mapped = 0;
+ size = 1UL << __fls(cfg->pgsize_bitmap);
+ iova = (1UL << cfg->ias) - size;
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_NOEXEC | IOMMU_CACHE,
+ GFP_KERNEL, &mapped))
+ return __FAIL(ops, i);
+ if (mapped != size)
+ return __FAIL(ops, i);
+ if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
+ return __FAIL(ops, i);
+
free_io_pgtable_ops(ops);
}