diff options
author | Lucas De Marchi <lucas.demarchi@intel.com> | 2023-09-27 12:38:55 -0700 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 11:41:20 -0500 |
commit | 23c8495efeed0d83657de89b44a569ac406bdfad (patch) | |
tree | f0065365589370e3fde292e28571b5b7221debbe | |
parent | 0e5e77bd9704edf1713ebed37e2da1b4faa25a52 (diff) |
drm/xe/migrate: Do not hand-encode pte
Instead of encoding the pte, call a new vfunc from xe_vm to handle that.
The encoding may not be the same on every platform, so keeping it in one
place helps to better support them.
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://lore.kernel.org/r/20230927193902.2849159-5-lucas.demarchi@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
-rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pt_types.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 23 |
3 files changed, 32 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 6884e79199d5..cd4dbbf6c383 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -261,8 +261,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, level = 2; ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8; - flags = XE_PAGE_RW | XE_PAGE_PRESENT | PPAT_CACHED | - XE_PPGTT_PTE_DM | XE_PDPE_PS_1G; + flags = vm->pt_ops->pte_encode_addr(0, XE_CACHE_WB, level, true, 0); /* * Use 1GB pages, it shouldn't matter the physical amount of @@ -483,7 +482,8 @@ static void emit_pte(struct xe_migrate *m, ptes -= chunk; while (chunk--) { - u64 addr; + u64 addr, flags = 0; + bool devmem = false; addr = xe_res_dma(cur) & PAGE_MASK; if (is_vram) { @@ -491,13 +491,15 @@ static void emit_pte(struct xe_migrate *m, if ((m->q->vm->flags & XE_VM_FLAG_64K) && !(cur_ofs & (16 * 8 - 1))) { xe_tile_assert(m->tile, IS_ALIGNED(addr, SZ_64K)); - addr |= XE_PTE_PS64; + flags |= XE_PTE_PS64; } addr += vram_region_gpu_offset(bo->ttm.resource); - addr |= XE_PPGTT_PTE_DM; + devmem = true; } - addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW; + + addr = m->q->vm->pt_ops->pte_encode_addr(addr, XE_CACHE_WB, + 0, devmem, flags); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h index c58f6926fabf..64e3921a0f46 100644 --- a/drivers/gpu/drm/xe/xe_pt_types.h +++ b/drivers/gpu/drm/xe/xe_pt_types.h @@ -39,6 +39,8 @@ struct xe_pt_ops { enum xe_cache_level cache, u32 pt_level); u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma, enum xe_cache_level cache, u32 pt_level); + u64 (*pte_encode_addr)(u64 addr, enum xe_cache_level cache, + u32 pt_level, bool devmem, u64 flags); u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset, const enum xe_cache_level cache); }; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 72e27e6809f9..1d3569097e5f 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1235,7 +1235,6 @@ static u64 pte_encode_cache(enum xe_cache_level cache) static u64 pte_encode_ps(u32 pt_level) { - /* XXX: Does hw support 1 GiB pages? */ XE_WARN_ON(pt_level > 2); if (pt_level == 1) @@ -1291,9 +1290,31 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, return pte; } +static u64 xelp_pte_encode_addr(u64 addr, enum xe_cache_level cache, + u32 pt_level, bool devmem, u64 flags) +{ + u64 pte; + + /* Avoid passing random bits directly as flags */ + XE_WARN_ON(flags & ~XE_PTE_PS64); + + pte = addr; + pte |= XE_PAGE_PRESENT | XE_PAGE_RW; + pte |= pte_encode_cache(cache); + pte |= pte_encode_ps(pt_level); + + if (devmem) + pte |= XE_PPGTT_PTE_DM; + + pte |= flags; + + return pte; +} + static const struct xe_pt_ops xelp_pt_ops = { .pte_encode_bo = xelp_pte_encode_bo, .pte_encode_vma = xelp_pte_encode_vma, + .pte_encode_addr = xelp_pte_encode_addr, .pde_encode_bo = xelp_pde_encode_bo, }; |