From 8802190f642034a919c8e757987c964d3d9df39f Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 12 Apr 2021 22:09:59 -0700 Subject: drm/i915: eliminate remaining uses of intel_device_info->gen Replace gen with the new graphics_ver value and use GRAPHICS_VER() in those places. Reviewed-by: Jani Nikula Signed-off-by: Lucas De Marchi Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210413051002.92589-10-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5964e67c7d36..297143511f99 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -274,7 +274,7 @@ struct i915_execbuffer { struct drm_mm_node node; /** temporary GTT binding */ unsigned long vaddr; /** Current kmap address */ unsigned long page; /** Currently mapped page index */ - unsigned int gen; /** Cached value of INTEL_GEN */ + unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */ bool use_64bit_reloc : 1; bool has_llc : 1; bool has_fence : 1; @@ -1049,10 +1049,10 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->page = -1; cache->vaddr = 0; /* Must be a variable in the struct to allow GCC to unroll. */ - cache->gen = INTEL_GEN(i915); + cache->graphics_ver = GRAPHICS_VER(i915); cache->has_llc = HAS_LLC(i915); cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); - cache->has_fence = cache->gen < 4; + cache->has_fence = cache->graphics_ver < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->node.flags = 0; reloc_cache_clear(cache); @@ -1402,7 +1402,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, err = eb->engine->emit_bb_start(rq, batch->node.start, PAGE_SIZE, - cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); + cache->graphics_ver > 5 ? 0 : I915_DISPATCH_SECURE); if (err) goto skip_request; @@ -1503,14 +1503,14 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, u64 offset, u64 target_addr) { - const unsigned int gen = eb->reloc_cache.gen; + const unsigned int ver = eb->reloc_cache.graphics_ver; unsigned int len; u32 *batch; u64 addr; - if (gen >= 8) + if (ver >= 8) len = offset & 7 ? 8 : 5; - else if (gen >= 4) + else if (ver >= 4) len = 4; else len = 3; @@ -1522,7 +1522,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, return false; addr = gen8_canonical_addr(vma->node.start + offset); - if (gen >= 8) { + if (ver >= 8) { if (offset & 7) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = lower_32_bits(addr); @@ -1542,7 +1542,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, *batch++ = lower_32_bits(target_addr); *batch++ = upper_32_bits(target_addr); } - } else if (gen >= 6) { + } else if (ver >= 6) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = 0; *batch++ = addr; @@ -1552,12 +1552,12 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, *batch++ = 0; *batch++ = vma_phys_addr(vma, offset); *batch++ = target_addr; - } else if (gen >= 4) { + } else if (ver >= 4) { *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = addr; *batch++ = target_addr; - } else if (gen >= 3 && + } else if (ver >= 3 && !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) { *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *batch++ = addr; -- cgit From d57d4a1daf5e8393e87244427c9b87dd565bd37c Mon Sep 17 00:00:00 2001 From: CQ Tang Date: Wed, 21 Apr 2021 11:46:55 +0100 Subject: drm/i915: Create stolen memory region from local memory Add "REGION_STOLEN" device info to dg1, create stolen memory region from upper portion of local device memory, starting from DSMBASE. v2: - s/drm_info/drm_dbg; userspace likely doesn't care about stolen. - mem->type is only setup after the region probe, so setting the name as stolen-local or stolen-system based on this value won't work. Split system vs local stolen setup to fix this. - kill all the region->devmem/is_devmem stuff. We already differentiate the different types of stolen so such things shouldn't be needed anymore. v3: - split stolen lmem vs smem ops(Tvrtko) - add shortcut for stolen region in i915(Tvrtko) - sanity check dsm base vs bar size(Xinyun) v4(Tvrtko): - more cleanup - add some TODOs Signed-off-by: CQ Tang Signed-off-by: Matthew Auld Cc: Tvrtko Ursulin Cc: Xinyun Liu Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20210421104658.304142-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 132 ++++++++++++++++++++++++----- drivers/gpu/drm/i915/gem/i915_gem_stolen.h | 3 +- drivers/gpu/drm/i915/i915_drv.h | 7 ++ drivers/gpu/drm/i915/i915_pci.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_memory_region.c | 13 ++- drivers/gpu/drm/i915/intel_memory_region.h | 5 +- 7 files changed, 140 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index b0597de206de..13a7932cfe1a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -10,6 +10,7 @@ #include #include +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_gem_stolen.h" @@ -121,6 +122,14 @@ static int i915_adjust_stolen(struct drm_i915_private *i915, } } + /* + * With stolen lmem, we don't need to check if the address range + * overlaps with the non-stolen system memory range, since lmem is local + * to the gpu. + */ + if (HAS_LMEM(i915)) + return 0; + /* * Verify that nothing else uses this physical address. Stolen * memory should be reserved by the BIOS and hidden from the @@ -374,8 +383,9 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915, } } -static int i915_gem_init_stolen(struct drm_i915_private *i915) +static int i915_gem_init_stolen(struct intel_memory_region *mem) { + struct drm_i915_private *i915 = mem->i915; struct intel_uncore *uncore = &i915->uncore; resource_size_t reserved_base, stolen_top; resource_size_t reserved_total, reserved_size; @@ -396,10 +406,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) return 0; } - if (resource_size(&intel_graphics_stolen_res) == 0) + if (resource_size(&mem->region) == 0) return 0; - i915->dsm = intel_graphics_stolen_res; + i915->dsm = mem->region; if (i915_adjust_stolen(i915, &i915->dsm)) return 0; @@ -688,39 +698,123 @@ struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_i915_private *i915, resource_size_t size) { - return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN_SMEM], + return i915_gem_object_create_region(i915->mm.stolen_region, size, I915_BO_ALLOC_CONTIGUOUS); } -static int init_stolen(struct intel_memory_region *mem) +static int init_stolen_smem(struct intel_memory_region *mem) { - intel_memory_region_set_name(mem, "stolen"); - /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. */ - return i915_gem_init_stolen(mem->i915); + return i915_gem_init_stolen(mem); } -static void release_stolen(struct intel_memory_region *mem) +static void release_stolen_smem(struct intel_memory_region *mem) { i915_gem_cleanup_stolen(mem->i915); } -static const struct intel_memory_region_ops i915_region_stolen_ops = { - .init = init_stolen, - .release = release_stolen, +static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { + .init = init_stolen_smem, + .release = release_stolen_smem, .init_object = _i915_gem_object_stolen_init, }; -struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) +static int init_stolen_lmem(struct intel_memory_region *mem) +{ + int err; + + if (GEM_WARN_ON(resource_size(&mem->region) == 0)) + return -ENODEV; + + if (!io_mapping_init_wc(&mem->iomap, + mem->io_start, + resource_size(&mem->region))) + return -EIO; + + /* + * TODO: For stolen lmem we mostly just care about populating the dsm + * related bits and setting up the drm_mm allocator for the range. + * Perhaps split up i915_gem_init_stolen() for this. + */ + err = i915_gem_init_stolen(mem); + if (err) + goto err_fini; + + return 0; + +err_fini: + io_mapping_fini(&mem->iomap); + return err; +} + +static void release_stolen_lmem(struct intel_memory_region *mem) { - return intel_memory_region_create(i915, - intel_graphics_stolen_res.start, - resource_size(&intel_graphics_stolen_res), - PAGE_SIZE, 0, - &i915_region_stolen_ops); + io_mapping_fini(&mem->iomap); + i915_gem_cleanup_stolen(mem->i915); +} + +static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { + .init = init_stolen_lmem, + .release = release_stolen_lmem, + .init_object = _i915_gem_object_stolen_init, +}; + +struct intel_memory_region * +i915_gem_stolen_lmem_setup(struct drm_i915_private *i915) +{ + struct intel_uncore *uncore = &i915->uncore; + struct pci_dev *pdev = i915->drm.pdev; + struct intel_memory_region *mem; + resource_size_t io_start; + resource_size_t lmem_size; + u64 lmem_base; + + lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE); + if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2))) + return ERR_PTR(-ENODEV); + + lmem_size = pci_resource_len(pdev, 2) - lmem_base; + io_start = pci_resource_start(pdev, 2) + lmem_base; + + mem = intel_memory_region_create(i915, lmem_base, lmem_size, + I915_GTT_PAGE_SIZE_4K, io_start, + &i915_region_stolen_lmem_ops); + if (IS_ERR(mem)) + return mem; + + /* + * TODO: consider creating common helper to just print all the + * interesting stuff from intel_memory_region, which we can use for all + * our probed regions. + */ + + drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", + &mem->io_start); + + intel_memory_region_set_name(mem, "stolen-local"); + + return mem; +} + +struct intel_memory_region* +i915_gem_stolen_smem_setup(struct drm_i915_private *i915) +{ + struct intel_memory_region *mem; + + mem = intel_memory_region_create(i915, + intel_graphics_stolen_res.start, + resource_size(&intel_graphics_stolen_res), + PAGE_SIZE, 0, + &i915_region_stolen_smem_ops); + if (IS_ERR(mem)) + return mem; + + intel_memory_region_set_name(mem, "stolen-system"); + + return mem; } struct drm_i915_gem_object * @@ -728,7 +822,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, resource_size_t stolen_offset, resource_size_t size) { - struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN_SMEM]; + struct intel_memory_region *mem = i915->mm.stolen_region; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index b03489706796..2bec6c367b9c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -21,7 +21,8 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, u64 end); void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node); -struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915); +struct intel_memory_region *i915_gem_stolen_smem_setup(struct drm_i915_private *i915); +struct intel_memory_region *i915_gem_stolen_lmem_setup(struct drm_i915_private *i915); struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, resource_size_t size); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e20294e9227a..0b44333eb703 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -514,6 +514,13 @@ struct intel_l3_parity { }; struct i915_gem_mm { + /* + * Shortcut for the stolen region. This points to either + * INTEL_REGION_STOLEN_SMEM for integrated platforms, or + * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't + * support stolen. + */ + struct intel_memory_region *stolen_region; /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; /** Protects the usage of the GTT stolen memory allocator. This is diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 44e7b94db63d..d4673e43a46d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -908,7 +908,7 @@ static const struct intel_device_info rkl_info = { }; #define DGFX_FEATURES \ - .memory_regions = REGION_SMEM | REGION_LMEM, \ + .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ .has_master_unit_irq = 1, \ .has_llc = 0, \ .has_snoop = 1, \ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f41474913d27..da73dc939e58 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -12194,6 +12194,7 @@ enum skl_power_gate { #define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */ #define GEN12_GSMBASE _MMIO(0x108100) +#define GEN12_DSMBASE _MMIO(0x1080C0) /* gamt regs */ #define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index bf837b6bb185..481a487faca6 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -22,6 +22,10 @@ static const struct { .class = INTEL_MEMORY_STOLEN_SYSTEM, .instance = 0, }, + [INTEL_REGION_STOLEN_LMEM] = { + .class = INTEL_MEMORY_STOLEN_LOCAL, + .instance = 0, + }, }; struct intel_memory_region * @@ -278,8 +282,15 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915) case INTEL_MEMORY_SYSTEM: mem = i915_gem_shmem_setup(i915); break; + case INTEL_MEMORY_STOLEN_LOCAL: + mem = i915_gem_stolen_lmem_setup(i915); + if (!IS_ERR(mem)) + i915->mm.stolen_region = mem; + break; case INTEL_MEMORY_STOLEN_SYSTEM: - mem = i915_gem_stolen_setup(i915); + mem = i915_gem_stolen_smem_setup(i915); + if (!IS_ERR(mem)) + i915->mm.stolen_region = mem; break; default: continue; diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index edd49067c8ca..4c8ec15af55f 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -26,18 +26,21 @@ enum intel_memory_type { INTEL_MEMORY_SYSTEM = 0, INTEL_MEMORY_LOCAL, INTEL_MEMORY_STOLEN_SYSTEM, + INTEL_MEMORY_STOLEN_LOCAL, }; enum intel_region_id { INTEL_REGION_SMEM = 0, INTEL_REGION_LMEM, INTEL_REGION_STOLEN_SMEM, + INTEL_REGION_STOLEN_LMEM, INTEL_REGION_UNKNOWN, /* Should be last */ }; #define REGION_SMEM BIT(INTEL_REGION_SMEM) #define REGION_LMEM BIT(INTEL_REGION_LMEM) #define REGION_STOLEN_SMEM BIT(INTEL_REGION_STOLEN_SMEM) +#define REGION_STOLEN_LMEM BIT(INTEL_REGION_STOLEN_LMEM) #define I915_ALLOC_MIN_PAGE_SIZE BIT(0) #define I915_ALLOC_CONTIGUOUS BIT(1) @@ -82,7 +85,7 @@ struct intel_memory_region { u16 type; u16 instance; enum intel_region_id id; - char name[8]; + char name[16]; struct list_head reserved; -- cgit From b75947e6b29d5b33f79d7b3918747592534e3fb1 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 21 Apr 2021 11:46:56 +0100 Subject: drm/i915/stolen: treat stolen local as normal local memory Underneath it's the same stuff, so things like the PTE_LM bits for the GTT should just keep working as-is. Signed-off-by: Matthew Auld Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20210421104658.304142-2-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index ce1c83c13d05..017db8f71130 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -19,7 +19,10 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { - return obj->ops == &i915_gem_lmem_obj_ops; + struct intel_memory_region *mr = obj->mm.region; + + return mr && (mr->type == INTEL_MEMORY_LOCAL || + mr->type == INTEL_MEMORY_STOLEN_LOCAL); } struct drm_i915_gem_object * -- cgit From f9a7b01e95d5e6c3f79f0efbbbbb2bbc52a8f3ce Mon Sep 17 00:00:00 2001 From: CQ Tang Date: Wed, 21 Apr 2021 11:46:57 +0100 Subject: drm/i915/stolen: enforce the min_page_size contract Since stolen can now be device local-memory underneath, we should try to enforce any min_page_size restrictions when allocating pages. Signed-off-by: CQ Tang Signed-off-by: Matthew Auld Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20210421104658.304142-3-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 13a7932cfe1a..b43929da8de8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -677,7 +677,8 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, if (!stolen) return -ENOMEM; - ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096); + ret = i915_gem_stolen_insert_node(i915, stolen, size, + mem->min_page_size); if (ret) goto err_free; @@ -836,8 +837,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, /* KISS and expect everything to be page-aligned */ if (GEM_WARN_ON(size == 0) || - GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || - GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) + GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) || + GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size))) return ERR_PTR(-EINVAL); stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); -- cgit From ae7f0dc105c4e5248e6e2e2299e52d9a289e1215 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 21 Apr 2021 11:46:58 +0100 Subject: drm/i915/stolen: actually mark as contiguous Stolen memory is always allocated as physically contiguous pages, so mark the object flags as such. It looks like the flags were previously just ignored so this had no effect. In the future we might to add the proper plumbing for passing the flags all over the way down from the caller, but for now we don't have a use for that. Signed-off-by: Matthew Auld Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20210421104658.304142-4-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index b43929da8de8..c5b64b2400e8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -637,10 +637,17 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, { static struct lock_class_key lock_class; unsigned int cache_level; + unsigned int flags; int err; + /* + * Stolen objects are always physically contiguous since we just + * allocate one big block underneath using the drm_mm range allocator. + */ + flags = I915_BO_ALLOC_CONTIGUOUS; + drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); - i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, 0); + i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); obj->stolen = stolen; obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; @@ -699,8 +706,7 @@ struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_i915_private *i915, resource_size_t size) { - return i915_gem_object_create_region(i915->mm.stolen_region, - size, I915_BO_ALLOC_CONTIGUOUS); + return i915_gem_object_create_region(i915->mm.stolen_region, size, 0); } static int init_stolen_smem(struct intel_memory_region *mem) -- cgit From 772f7bb75dffd4ec90eaf411f9e09dc2429f5c81 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 21 Apr 2021 14:09:38 +0200 Subject: drm/i915: Fix docbook descriptions for i915_gem_shrinker Fixes the following htmldocs warning: drivers/gpu/drm/i915/gem/i915_gem_shrinker.c:102: warning: Function parameter or member 'ww' not described in 'i915_gem_shrink' Fixes: cf41a8f1dc1e ("drm/i915: Finally remove obj->mm.lock.") Reported-by: Stephen Rothwell Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20210421120938.546076-1-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 3e248d3bd869..4f9c8d3021ab 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -70,6 +70,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj, /** * i915_gem_shrink - Shrink buffer object caches + * @ww: i915 gem ww acquire ctx, or NULL * @i915: i915 device * @target: amount of memory to make available, in pages * @nr_scanned: optional output for number of pages scanned (incremental) -- cgit From ed52c62d386f764194e0184fdb905d5f24194cae Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 21 Apr 2021 18:33:58 +0300 Subject: drm/i915: Avoid div-by-zero on gen2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gen2 tiles are 2KiB in size so i915_gem_object_get_tile_row_size() can in fact return <4KiB, which leads to div-by-zero here. Avoid that. Not sure i915_gem_object_get_tile_row_size() is entirely sane anyway since it doesn't account for the different tile layouts on i8xx/i915... I'm not able to hit this before commit 6846895fde05 ("drm/i915: Replace PIN_NONFAULT with calls to PIN_NOEVICT") and it looks like I also need to run recent version of Mesa. With those in place xonotic trips on this quite easily on my 85x. Cc: stable@vger.kernel.org Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210421153401.13847-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 2561a2f1e54f..8598a1c78a4c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj, struct i915_ggtt_view view; if (i915_gem_object_is_tiled(obj)) - chunk = roundup(chunk, tile_row_pages(obj)); + chunk = roundup(chunk, tile_row_pages(obj) ?: 1); view.type = I915_GGTT_VIEW_PARTIAL; view.partial.offset = rounddown(page_offset, chunk); -- cgit From 529b9ec809a0a073192e5dc757fc383e11926661 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Tue, 27 Apr 2021 09:54:13 +0100 Subject: drm/i915/gtt: map the PD up front We need to generalise our accessor for the page directories and tables from using the simple kmap_atomic to support local memory, and this setup must be done on acquisition of the backing storage prior to entering fence execution contexts. Here we replace the kmap with the object mapping code that for simple single page shmemfs object will return a plain kmap, that is then kept for the lifetime of the page directory. Note that keeping the mapping around is a potential concern here, since while the vma is pinned the mapping remains there for the PDs underneath, or at least until the used_count reaches zero, at which point we can safely destroy the mapping. For 32b this will be even worse since the address space is more limited, but since this change mostly impacts full ppGTT platforms, the justification is that for modern platforms we shouldn't care too much about 32b. Signed-off-by: Matthew Auld Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20210427085417.120246-3-matthew.auld@intel.com --- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 11 +---- drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 11 ++--- drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 26 +++++------- drivers/gpu/drm/i915/gt/intel_ggtt.c | 2 +- drivers/gpu/drm/i915/gt/intel_gtt.c | 48 ++++++++++------------ drivers/gpu/drm/i915/gt/intel_gtt.h | 11 ++--- drivers/gpu/drm/i915/gt/intel_ppgtt.c | 7 ++-- drivers/gpu/drm/i915/i915_vma.c | 3 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 10 ++--- drivers/gpu/drm/i915/selftests/i915_perf.c | 3 +- 10 files changed, 54 insertions(+), 78 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 5fef592390cb..ce70d0a3afb2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1740,7 +1740,6 @@ out: static int check_scratch_page(struct i915_gem_context *ctx, u32 *out) { struct i915_address_space *vm; - struct page *page; u32 *vaddr; int err = 0; @@ -1748,24 +1747,18 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out) if (!vm) return -ENODEV; - page = __px_page(vm->scratch[0]); - if (!page) { + if (!vm->scratch[0]) { pr_err("No scratch page!\n"); return -EINVAL; } - vaddr = kmap(page); - if (!vaddr) { - pr_err("No (mappable) scratch page!\n"); - return -EINVAL; - } + vaddr = __px_vaddr(vm->scratch[0]); memcpy(out, vaddr, sizeof(*out)); if (memchr_inv(vaddr, *out, PAGE_SIZE)) { pr_err("Inconsistent initial state of scratch page!\n"); err = -EINVAL; } - kunmap(page); return err; } diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index e08dff376339..21b1085769be 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -96,9 +96,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, * entries back to scratch. */ - vaddr = kmap_atomic_px(pt); + vaddr = px_vaddr(pt); memset32(vaddr + pte, scratch_pte, count); - kunmap_atomic(vaddr); pte = 0; } @@ -120,7 +119,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, GEM_BUG_ON(!pd->entry[act_pt]); - vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); + vaddr = px_vaddr(i915_pt_entry(pd, act_pt)); do { GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); @@ -136,12 +135,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, } if (++act_pte == GEN6_PTES) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt)); + vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt)); act_pte = 0; } } while (1); - kunmap_atomic(vaddr); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } @@ -235,7 +232,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) goto err_scratch0; } - ret = pin_pt_dma(vm, vm->scratch[1]); + ret = map_pt_dma(vm, vm->scratch[1]); if (ret) goto err_scratch1; diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 74bf6fc8461f..90adfee52b41 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -242,11 +242,10 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, atomic_read(&pt->used)); GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); - vaddr = kmap_atomic_px(pt); + vaddr = px_vaddr(pt); memset64(vaddr + gen8_pd_index(start, 0), vm->scratch[0]->encode, count); - kunmap_atomic(vaddr); atomic_sub(count, &pt->used); start += count; @@ -375,7 +374,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, gen8_pte_t *vaddr; pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; @@ -402,12 +401,10 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, } clflush_cache_range(vaddr, PAGE_SIZE); - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); } } while (1); clflush_cache_range(vaddr, PAGE_SIZE); - kunmap_atomic(vaddr); return idx; } @@ -442,7 +439,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, encode |= GEN8_PDE_PS_2M; page_size = I915_GTT_PAGE_SIZE_2M; - vaddr = kmap_atomic_px(pd); + vaddr = px_vaddr(pd); } else { struct i915_page_table *pt = i915_pt_entry(pd, __gen8_pte_index(start, 1)); @@ -457,7 +454,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) maybe_64K = __gen8_pte_index(start, 1); - vaddr = kmap_atomic_px(pt); + vaddr = px_vaddr(pt); } do { @@ -491,7 +488,6 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } while (rem >= page_size && index < I915_PDES); clflush_cache_range(vaddr, PAGE_SIZE); - kunmap_atomic(vaddr); /* * Is it safe to mark the 2M block as 64K? -- Either we have @@ -505,9 +501,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, !iter->sg && IS_ALIGNED(vma->node.start + vma->node.size, I915_GTT_PAGE_SIZE_2M)))) { - vaddr = kmap_atomic_px(pd); + vaddr = px_vaddr(pd); vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; - kunmap_atomic(vaddr); page_size = I915_GTT_PAGE_SIZE_64K; /* @@ -523,12 +518,11 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, u16 i; encode = vma->vm->scratch[0]->encode; - vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); + vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K)); for (i = 1; i < index; i += 16) memset64(vaddr + i, encode, 15); - kunmap_atomic(vaddr); } } @@ -602,7 +596,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) if (IS_ERR(obj)) goto free_scratch; - ret = pin_pt_dma(vm, obj); + ret = map_pt_dma(vm, obj); if (ret) { i915_gem_object_put(obj); goto free_scratch; @@ -639,7 +633,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) if (IS_ERR(pde)) return PTR_ERR(pde); - err = pin_pt_dma(vm, pde->pt.base); + err = map_pt_dma(vm, pde->pt.base); if (err) { free_pd(vm, pde); return err; @@ -674,7 +668,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) goto err_pd; } - err = pin_pt_dma(vm, pd->pt.base); + err = map_pt_dma(vm, pd->pt.base); if (err) goto err_pd; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 670c1271e7d5..d94628b9d89e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -657,7 +657,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) goto err_ppgtt; i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); - err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); + err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); i915_gem_object_unlock(ppgtt->vm.scratch[0]); if (err) goto err_stash; diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 941f8af016d6..d386b89e2758 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -25,27 +25,25 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) return obj; } -int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) +int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) { - int err; + void *vaddr; - i915_gem_object_lock(obj, NULL); - err = i915_gem_object_pin_pages(obj); - i915_gem_object_unlock(obj); - if (err) - return err; + vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); i915_gem_object_make_unshrinkable(obj); return 0; } -int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) +int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) { - int err; + void *vaddr; - err = i915_gem_object_pin_pages(obj); - if (err) - return err; + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); i915_gem_object_make_unshrinkable(obj); return 0; @@ -155,6 +153,14 @@ void clear_pages(struct i915_vma *vma) memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); } +void *__px_vaddr(struct drm_i915_gem_object *p) +{ + enum i915_map_type type; + + GEM_BUG_ON(!i915_gem_object_has_pages(p)); + return page_unpack_bits(p->mm.mapping, &type); +} + dma_addr_t __px_dma(struct drm_i915_gem_object *p) { GEM_BUG_ON(!i915_gem_object_has_pages(p)); @@ -170,32 +176,22 @@ struct page *__px_page(struct drm_i915_gem_object *p) void fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count) { - struct page *page = __px_page(p); - void *vaddr; + void *vaddr = __px_vaddr(p); - vaddr = kmap(page); memset64(vaddr, val, count); clflush_cache_range(vaddr, PAGE_SIZE); - kunmap(page); } static void poison_scratch_page(struct drm_i915_gem_object *scratch) { - struct sgt_iter sgt; - struct page *page; + void *vaddr = __px_vaddr(scratch); u8 val; val = 0; if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) val = POISON_FREE; - for_each_sgt_page(page, sgt, scratch->mm.pages) { - void *vaddr; - - vaddr = kmap(page); - memset(vaddr, val, PAGE_SIZE); - kunmap(page); - } + memset(vaddr, val, scratch->base.size); } int setup_scratch_page(struct i915_address_space *vm) @@ -225,7 +221,7 @@ int setup_scratch_page(struct i915_address_space *vm) if (IS_ERR(obj)) goto skip; - if (pin_pt_dma(vm, obj)) + if (map_pt_dma(vm, obj)) goto skip_obj; /* We need a single contiguous page for our scratch */ diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index e67e34e17913..40e486704558 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -180,6 +180,9 @@ struct page *__px_page(struct drm_i915_gem_object *p); dma_addr_t __px_dma(struct drm_i915_gem_object *p); #define px_dma(px) (__px_dma(px_base(px))) +void *__px_vaddr(struct drm_i915_gem_object *p); +#define px_vaddr(px) (__px_vaddr(px_base(px))) + #define px_pt(px) \ __px_choose_expr(px, struct i915_page_table *, __x, \ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ @@ -511,8 +514,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); void i915_ggtt_suspend(struct i915_ggtt *gtt); void i915_ggtt_resume(struct i915_ggtt *ggtt); -#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px))) - void fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count); @@ -530,8 +531,8 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm); struct i915_page_directory *alloc_pd(struct i915_address_space *vm); struct i915_page_directory *__alloc_pd(int npde); -int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); -int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); +int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); +int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl); @@ -578,7 +579,7 @@ void setup_private_pat(struct intel_uncore *uncore); int i915_vm_alloc_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, u64 size); -int i915_vm_pin_pt_stash(struct i915_address_space *vm, +int i915_vm_map_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash); void i915_vm_free_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash); diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 014ae8ac4480..4e3d80c2295c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -87,11 +87,10 @@ write_dma_entry(struct drm_i915_gem_object * const pdma, const unsigned short idx, const u64 encoded_entry) { - u64 * const vaddr = kmap_atomic(__px_page(pdma)); + u64 * const vaddr = __px_vaddr(pdma); vaddr[idx] = encoded_entry; clflush_cache_range(&vaddr[idx], sizeof(u64)); - kunmap_atomic(vaddr); } void @@ -258,7 +257,7 @@ int i915_vm_alloc_pt_stash(struct i915_address_space *vm, return 0; } -int i915_vm_pin_pt_stash(struct i915_address_space *vm, +int i915_vm_map_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash) { struct i915_page_table *pt; @@ -266,7 +265,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm, for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { for (pt = stash->pt[n]; pt; pt = pt->stash) { - err = pin_pt_dma_locked(vm, pt->base); + err = map_pt_dma_locked(vm, pt->base); if (err) return err; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 07490db51cdc..eb01899ac6b7 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -905,8 +905,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (err) goto err_fence; - err = i915_vm_pin_pt_stash(vma->vm, - &work->stash); + err = i915_vm_map_pt_stash(vma->vm, &work->stash); if (err) goto err_fence; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 2e4f06eaacc1..e060e455e9f6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -186,7 +186,7 @@ retry: if (err) goto err_ppgtt_cleanup; - err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); + err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); if (err) { i915_vm_free_pt_stash(&ppgtt->vm, &stash); goto err_ppgtt_cleanup; @@ -208,7 +208,7 @@ retry: if (err) goto err_ppgtt_cleanup; - err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); + err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); if (err) { i915_vm_free_pt_stash(&ppgtt->vm, &stash); goto err_ppgtt_cleanup; @@ -325,11 +325,10 @@ retry: BIT_ULL(size))) goto alloc_vm_end; - err = i915_vm_pin_pt_stash(vm, &stash); + err = i915_vm_map_pt_stash(vm, &stash); if (!err) vm->allocate_va_range(vm, &stash, addr, BIT_ULL(size)); - i915_vm_free_pt_stash(vm, &stash); alloc_vm_end: if (err == -EDEADLK) { @@ -1967,10 +1966,9 @@ retry: if (err) goto end_ww; - err = i915_vm_pin_pt_stash(vm, &stash); + err = i915_vm_map_pt_stash(vm, &stash); if (!err) vm->allocate_va_range(vm, &stash, offset, chunk_size); - i915_vm_free_pt_stash(vm, &stash); end_ww: if (err == -EDEADLK) { diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index e9d86dab8677..bfb0290967a1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -307,7 +307,7 @@ static int live_noa_gpr(void *arg) } /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */ - scratch = kmap(__px_page(ce->vm->scratch[0])); + scratch = __px_vaddr(ce->vm->scratch[0]); memset(scratch, POISON_FREE, PAGE_SIZE); rq = intel_context_create_request(ce); @@ -405,7 +405,6 @@ static int live_noa_gpr(void *arg) out_rq: i915_request_put(rq); out_ce: - kunmap(__px_page(ce->vm->scratch[0])); intel_context_put(ce); out: stream_destroy(stream); -- cgit From 4bc91dbde0da2f88b1f3660e8d17dacd2e929a62 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 27 Apr 2021 09:54:16 +0100 Subject: drm/i915/lmem: Bypass aperture when lmem is available MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the scenario where local memory is available, we have rely on CPU access via lmem directly instead of aperture. v2: gmch is only relevant for much older hw, therefore we can drop the has_aperture check since it should always be present on such platforms. (Chris) Cc: Ville Syrjälä Cc: Dhinakaran Pandiyan Cc: Maarten Lankhorst Cc: Chris P Wilson Cc: Daniel Vetter Cc: Joonas Lahtinen Cc: Daniele Ceraolo Spurio Cc: CQ Tang Signed-off-by: Anusha Srivatsa Signed-off-by: Matthew Auld Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20210427085417.120246-6-matthew.auld@intel.com --- drivers/gpu/drm/i915/display/intel_fbdev.c | 22 +++++++++++++++------- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 15 +++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_lmem.h | 5 +++++ drivers/gpu/drm/i915/i915_vma.c | 25 +++++++++++++++++++------ 4 files changed, 54 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 2b37959da747..4af40229f5ec 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -139,14 +139,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper, size = mode_cmd.pitches[0] * mode_cmd.height; size = PAGE_ALIGN(size); - /* If the FB is too big, just don't use it since fbdev is not very - * important and we should probably use that space with FBC or other - * features. */ obj = ERR_PTR(-ENODEV); - if (size * 2 < dev_priv->stolen_usable_size) - obj = i915_gem_object_create_stolen(dev_priv, size); - if (IS_ERR(obj)) - obj = i915_gem_object_create_shmem(dev_priv, size); + if (HAS_LMEM(dev_priv)) { + obj = i915_gem_object_create_lmem(dev_priv, size, + I915_BO_ALLOC_CONTIGUOUS); + } else { + /* + * If the FB is too big, just don't use it since fbdev is not very + * important and we should probably use that space with FBC or other + * features. + */ + if (size * 2 < dev_priv->stolen_usable_size) + obj = i915_gem_object_create_stolen(dev_priv, size); + if (IS_ERR(obj)) + obj = i915_gem_object_create_shmem(dev_priv, size); + } + if (IS_ERR(obj)) { drm_err(&dev_priv->drm, "failed to allocate framebuffer\n"); return PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 017db8f71130..f44bdd08f7cb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -17,6 +17,21 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { .release = i915_gem_object_release_memory_region, }; +void __iomem * +i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, + unsigned long n, + unsigned long size) +{ + resource_size_t offset; + + GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); + + offset = i915_gem_object_get_dma_address(obj, n); + offset -= obj->mm.region->region.start; + + return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); +} + bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { struct intel_memory_region *mr = obj->mm.region; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 036d53c01de9..fac6bc5a5ebb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -14,6 +14,11 @@ struct intel_memory_region; extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; +void __iomem * +i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, + unsigned long n, + unsigned long size); + bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index eb01899ac6b7..468317e3b477 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -27,6 +27,7 @@ #include "display/intel_frontbuffer.h" +#include "gem/i915_gem_lmem.h" #include "gt/intel_engine.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gt.h" @@ -448,9 +449,11 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) void __iomem *ptr; int err; - if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { - err = -ENODEV; - goto err; + if (!i915_gem_object_is_lmem(vma->obj)) { + if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { + err = -ENODEV; + goto err; + } } GEM_BUG_ON(!i915_vma_is_ggtt(vma)); @@ -458,9 +461,19 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ptr = READ_ONCE(vma->iomap); if (ptr == NULL) { - ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, - vma->node.start, - vma->node.size); + /* + * TODO: consider just using i915_gem_object_pin_map() for lmem + * instead, which already supports mapping non-contiguous chunks + * of pages, that way we can also drop the + * I915_BO_ALLOC_CONTIGUOUS when allocating the object. + */ + if (i915_gem_object_is_lmem(vma->obj)) + ptr = i915_gem_object_lmem_io_map(vma->obj, 0, + vma->obj->base.size); + else + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, + vma->node.start, + vma->node.size); if (ptr == NULL) { err = -ENOMEM; goto err; -- cgit From ff2d0ba25ad68b97e3260f16c56113493bc58656 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 27 Apr 2021 19:48:57 +0200 Subject: drm/i915/gem: Remove reference to struct drm_device.pdev MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit References to struct drm_device.pdev should not be used any longer as the field will be moved into the struct's legacy section. Add a fix for the rsp commit. v2: * fix an error in the commit description (Michael) Signed-off-by: Thomas Zimmermann Reviewed-by: Jani Nikula Fixes: d57d4a1daf5e ("drm/i915: Create stolen memory region from local memory") Cc: CQ Tang Cc: Matthew Auld Cc: Tvrtko Ursulin Cc: Xinyun Liu Cc: Tvrtko Ursulin Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Mika Kuoppala Cc: Daniel Vetter Cc: Maarten Lankhorst Cc: "Thomas Hellström" Cc: "Gustavo A. R. Silva" Cc: Dan Carpenter Cc: intel-gfx@lists.freedesktop.org Reviewed-by: Michael J. Ruhl Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210427174857.7862-1-tzimmermann@suse.de --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index c5b64b2400e8..e1a32672bbe8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -773,7 +773,7 @@ struct intel_memory_region * i915_gem_stolen_lmem_setup(struct drm_i915_private *i915) { struct intel_uncore *uncore = &i915->uncore; - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct intel_memory_region *mem; resource_size_t io_start; resource_size_t lmem_size; -- cgit From bc6f80cce9ae7c83a4a9082c342fd733a4492928 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 26 Apr 2021 12:23:51 +0200 Subject: drm/i915: Use trylock in shrinker for ggtt on bsw vt-d and bxt, v2. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The stop_machine() lock may allocate memory, but is called inside vm->mutex, which is taken in the shrinker. This will cause a lockdep splat, as can be seen below: <4>[ 462.585762] ====================================================== <4>[ 462.585768] WARNING: possible circular locking dependency detected <4>[ 462.585773] 5.12.0-rc5-CI-Trybot_7644+ #1 Tainted: G U <4>[ 462.585779] ------------------------------------------------------ <4>[ 462.585783] i915_selftest/5540 is trying to acquire lock: <4>[ 462.585788] ffffffff826440b0 (cpu_hotplug_lock){++++}-{0:0}, at: stop_machine+0x12/0x30 <4>[ 462.585814] but task is already holding lock: <4>[ 462.585818] ffff888125369c70 (&vm->mutex/1){+.+.}-{3:3}, at: i915_vma_pin_ww+0x38e/0xb40 [i915] <4>[ 462.586301] which lock already depends on the new lock. <4>[ 462.586305] the existing dependency chain (in reverse order) is: <4>[ 462.586309] -> #2 (&vm->mutex/1){+.+.}-{3:3}: <4>[ 462.586323] i915_gem_shrinker_taints_mutex+0x2d/0x50 [i915] <4>[ 462.586719] i915_address_space_init+0x12d/0x130 [i915] <4>[ 462.587092] ppgtt_init+0x4e/0x80 [i915] <4>[ 462.587467] gen8_ppgtt_create+0x3e/0x5c0 [i915] <4>[ 462.587828] i915_ppgtt_create+0x28/0xf0 [i915] <4>[ 462.588203] intel_gt_init+0x123/0x370 [i915] <4>[ 462.588572] i915_gem_init+0x129/0x1f0 [i915] <4>[ 462.588971] i915_driver_probe+0x753/0xd80 [i915] <4>[ 462.589320] i915_pci_probe+0x43/0x1d0 [i915] <4>[ 462.589671] pci_device_probe+0x9e/0x110 <4>[ 462.589680] really_probe+0xea/0x410 <4>[ 462.589690] driver_probe_device+0xd9/0x140 <4>[ 462.589697] device_driver_attach+0x4a/0x50 <4>[ 462.589704] __driver_attach+0x83/0x140 <4>[ 462.589711] bus_for_each_dev+0x75/0xc0 <4>[ 462.589718] bus_add_driver+0x14b/0x1f0 <4>[ 462.589724] driver_register+0x66/0xb0 <4>[ 462.589731] i915_init+0x70/0x87 [i915] <4>[ 462.590053] do_one_initcall+0x56/0x2e0 <4>[ 462.590061] do_init_module+0x55/0x200 <4>[ 462.590068] load_module+0x2703/0x2990 <4>[ 462.590074] __do_sys_finit_module+0xad/0x110 <4>[ 462.590080] do_syscall_64+0x33/0x80 <4>[ 462.590089] entry_SYSCALL_64_after_hwframe+0x44/0xae <4>[ 462.590096] -> #1 (fs_reclaim){+.+.}-{0:0}: <4>[ 462.590109] fs_reclaim_acquire+0x9f/0xd0 <4>[ 462.590118] kmem_cache_alloc_trace+0x3d/0x430 <4>[ 462.590126] intel_cpuc_prepare+0x3b/0x1b0 <4>[ 462.590133] cpuhp_invoke_callback+0x9e/0x890 <4>[ 462.590141] _cpu_up+0xa4/0x130 <4>[ 462.590147] cpu_up+0x82/0x90 <4>[ 462.590153] bringup_nonboot_cpus+0x4a/0x60 <4>[ 462.590159] smp_init+0x21/0x5c <4>[ 462.590167] kernel_init_freeable+0x8a/0x1b7 <4>[ 462.590175] kernel_init+0x5/0xff <4>[ 462.590181] ret_from_fork+0x22/0x30 <4>[ 462.590187] -> #0 (cpu_hotplug_lock){++++}-{0:0}: <4>[ 462.590199] __lock_acquire+0x1520/0x2590 <4>[ 462.590207] lock_acquire+0xd1/0x3d0 <4>[ 462.590213] cpus_read_lock+0x39/0xc0 <4>[ 462.590219] stop_machine+0x12/0x30 <4>[ 462.590226] bxt_vtd_ggtt_insert_entries__BKL+0x36/0x50 [i915] <4>[ 462.590601] ggtt_bind_vma+0x5d/0x80 [i915] <4>[ 462.590970] i915_vma_bind+0xdc/0x1c0 [i915] <4>[ 462.591374] i915_vma_pin_ww+0x435/0xb40 [i915] <4>[ 462.591779] make_obj_busy+0xcb/0x330 [i915] <4>[ 462.592170] igt_mmap_offset_exhaustion+0x45f/0x4c0 [i915] <4>[ 462.592562] __i915_subtests.cold.7+0x42/0x92 [i915] <4>[ 462.592995] __run_selftests.part.3+0x10d/0x172 [i915] <4>[ 462.593428] i915_live_selftests.cold.5+0x1f/0x47 [i915] <4>[ 462.593860] i915_pci_probe+0x93/0x1d0 [i915] <4>[ 462.594210] pci_device_probe+0x9e/0x110 <4>[ 462.594217] really_probe+0xea/0x410 <4>[ 462.594226] driver_probe_device+0xd9/0x140 <4>[ 462.594233] device_driver_attach+0x4a/0x50 <4>[ 462.594240] __driver_attach+0x83/0x140 <4>[ 462.594247] bus_for_each_dev+0x75/0xc0 <4>[ 462.594254] bus_add_driver+0x14b/0x1f0 <4>[ 462.594260] driver_register+0x66/0xb0 <4>[ 462.594267] i915_init+0x70/0x87 [i915] <4>[ 462.594586] do_one_initcall+0x56/0x2e0 <4>[ 462.594592] do_init_module+0x55/0x200 <4>[ 462.594599] load_module+0x2703/0x2990 <4>[ 462.594605] __do_sys_finit_module+0xad/0x110 <4>[ 462.594612] do_syscall_64+0x33/0x80 <4>[ 462.594618] entry_SYSCALL_64_after_hwframe+0x44/0xae <4>[ 462.594625] other info that might help us debug this: <4>[ 462.594629] Chain exists of: cpu_hotplug_lock --> fs_reclaim --> &vm->mutex/1 <4>[ 462.594645] Possible unsafe locking scenario: <4>[ 462.594648] CPU0 CPU1 <4>[ 462.594652] ---- ---- <4>[ 462.594655] lock(&vm->mutex/1); <4>[ 462.594664] lock(fs_reclaim); <4>[ 462.594671] lock(&vm->mutex/1); <4>[ 462.594679] lock(cpu_hotplug_lock); <4>[ 462.594686] *** DEADLOCK *** <4>[ 462.594690] 4 locks held by i915_selftest/5540: <4>[ 462.594696] #0: ffff888100fbc240 (&dev->mutex){....}-{3:3}, at: device_driver_attach+0x18/0x50 <4>[ 462.594715] #1: ffffc900006cb9a0 (reservation_ww_class_acquire){+.+.}-{0:0}, at: make_obj_busy+0x81/0x330 [i915] <4>[ 462.595118] #2: ffff88812a6081e8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: make_obj_busy+0x21f/0x330 [i915] <4>[ 462.595519] #3: ffff888125369c70 (&vm->mutex/1){+.+.}-{3:3}, at: i915_vma_pin_ww+0x38e/0xb40 [i915] <4>[ 462.595934] stack backtrace: <4>[ 462.595939] CPU: 0 PID: 5540 Comm: i915_selftest Tainted: G U 5.12.0-rc5-CI-Trybot_7644+ #1 <4>[ 462.595947] Hardware name: GOOGLE Kefka/Kefka, BIOS MrChromebox 02/04/2018 <4>[ 462.595952] Call Trace: <4>[ 462.595961] dump_stack+0x7f/0xad <4>[ 462.595974] check_noncircular+0x12e/0x150 <4>[ 462.595982] ? save_stack.isra.17+0x3f/0x70 <4>[ 462.595991] ? drm_mm_insert_node_in_range+0x34a/0x5b0 <4>[ 462.596000] ? i915_vma_pin_ww+0x9ec/0xb40 [i915] <4>[ 462.596410] __lock_acquire+0x1520/0x2590 <4>[ 462.596419] ? do_init_module+0x55/0x200 <4>[ 462.596429] lock_acquire+0xd1/0x3d0 <4>[ 462.596435] ? stop_machine+0x12/0x30 <4>[ 462.596445] ? gen8_ggtt_insert_entries+0xf0/0xf0 [i915] <4>[ 462.596816] cpus_read_lock+0x39/0xc0 <4>[ 462.596824] ? stop_machine+0x12/0x30 <4>[ 462.596831] stop_machine+0x12/0x30 <4>[ 462.596839] bxt_vtd_ggtt_insert_entries__BKL+0x36/0x50 [i915] <4>[ 462.597210] ggtt_bind_vma+0x5d/0x80 [i915] <4>[ 462.597580] i915_vma_bind+0xdc/0x1c0 [i915] <4>[ 462.597986] i915_vma_pin_ww+0x435/0xb40 [i915] <4>[ 462.598395] ? make_obj_busy+0xcb/0x330 [i915] <4>[ 462.598786] make_obj_busy+0xcb/0x330 [i915] <4>[ 462.599180] ? 0xffffffff81000000 <4>[ 462.599187] ? debug_mutex_unlock+0x50/0xa0 <4>[ 462.599198] igt_mmap_offset_exhaustion+0x45f/0x4c0 [i915] <4>[ 462.599592] __i915_subtests.cold.7+0x42/0x92 [i915] <4>[ 462.600026] ? i915_perf_selftests+0x20/0x20 [i915] <4>[ 462.600422] ? __i915_nop_setup+0x10/0x10 [i915] <4>[ 462.600820] __run_selftests.part.3+0x10d/0x172 [i915] <4>[ 462.601253] i915_live_selftests.cold.5+0x1f/0x47 [i915] <4>[ 462.601686] i915_pci_probe+0x93/0x1d0 [i915] <4>[ 462.602037] ? _raw_spin_unlock_irqrestore+0x3d/0x60 <4>[ 462.602047] pci_device_probe+0x9e/0x110 <4>[ 462.602057] really_probe+0xea/0x410 <4>[ 462.602067] driver_probe_device+0xd9/0x140 <4>[ 462.602075] device_driver_attach+0x4a/0x50 <4>[ 462.602084] __driver_attach+0x83/0x140 <4>[ 462.602091] ? device_driver_attach+0x50/0x50 <4>[ 462.602099] ? device_driver_attach+0x50/0x50 <4>[ 462.602107] bus_for_each_dev+0x75/0xc0 <4>[ 462.602116] bus_add_driver+0x14b/0x1f0 <4>[ 462.602124] driver_register+0x66/0xb0 <4>[ 462.602133] i915_init+0x70/0x87 [i915] <4>[ 462.602453] ? 0xffffffffa0606000 <4>[ 462.602458] do_one_initcall+0x56/0x2e0 <4>[ 462.602466] ? kmem_cache_alloc_trace+0x374/0x430 <4>[ 462.602476] do_init_module+0x55/0x200 <4>[ 462.602484] load_module+0x2703/0x2990 <4>[ 462.602500] ? __do_sys_finit_module+0xad/0x110 <4>[ 462.602507] __do_sys_finit_module+0xad/0x110 <4>[ 462.602519] do_syscall_64+0x33/0x80 <4>[ 462.602527] entry_SYSCALL_64_after_hwframe+0x44/0xae <4>[ 462.602535] RIP: 0033:0x7fab69d8d89d Changes since v1: - Add lockdep annotations during init, to ensure that lockdep is primed. This also fixes a false positive when reading /proc/lockdep_stats during module reload. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20210426102351.921874-1-maarten.lankhorst@linux.intel.com Reviewed-by: Thomas Hellström --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 13 +++++++++---- drivers/gpu/drm/i915/gt/intel_ggtt.c | 8 +++++--- drivers/gpu/drm/i915/gt/intel_gtt.c | 17 ++++++++++++++++- drivers/gpu/drm/i915/i915_drv.h | 11 +++++++++-- drivers/gpu/drm/i915/i915_gem.c | 15 +++++++++++++-- 5 files changed, 52 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 4f9c8d3021ab..f4fb68e8955a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -38,15 +38,17 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) } static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, - unsigned long shrink) + unsigned long shrink, bool trylock_vm) { unsigned long flags; flags = 0; if (shrink & I915_SHRINK_ACTIVE) - flags = I915_GEM_OBJECT_UNBIND_ACTIVE; + flags |= I915_GEM_OBJECT_UNBIND_ACTIVE; if (!(shrink & I915_SHRINK_BOUND)) - flags = I915_GEM_OBJECT_UNBIND_TEST; + flags |= I915_GEM_OBJECT_UNBIND_TEST; + if (trylock_vm) + flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK; if (i915_gem_object_unbind(obj, flags) == 0) return true; @@ -117,6 +119,9 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, unsigned long scanned = 0; int err; + /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ + bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); + trace_i915_gem_shrink(i915, target, shrink); /* @@ -204,7 +209,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); err = 0; - if (unsafe_drop_pages(obj, shrink)) { + if (unsafe_drop_pages(obj, shrink, trylock_vm)) { /* May arrive from get_pages on another bo */ if (!ww) { if (!i915_gem_object_trylock(obj)) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index d94628b9d89e..dcb3b299cf4a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -906,9 +906,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_entries = gen8_ggtt_insert_entries; - /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ - if (intel_ggtt_update_needs_vtd_wa(i915) || - IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { + /* + * Serialize GTT updates with aperture access on BXT if VT-d is on, + * and always on CHV. + */ + if (intel_vm_no_concurrent_access_wa(i915)) { ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; ggtt->vm.bind_async_flags = diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 061c39d2ad51..9b98f9d9faa3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -154,7 +154,22 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) */ mutex_init(&vm->mutex); lockdep_set_subclass(&vm->mutex, subclass); - i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); + + if (!intel_vm_no_concurrent_access_wa(vm->i915)) { + i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); + } else { + /* + * CHV + BXT VTD workaround use stop_machine(), + * which is allowed to allocate memory. This means &vm->mutex + * is the outer lock, and in theory we can allocate memory inside + * it through stop_machine(). + * + * Add the annotation for this, we use trylock in shrinker. + */ + mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_); + might_alloc(GFP_KERNEL); + mutex_release(&vm->mutex.dep_map, _THIS_IP_); + } dma_resv_init(&vm->resv); GEM_BUG_ON(!vm->total); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 336b09f38aad..3cfa6effbb5f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1720,9 +1720,15 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) } static inline bool -intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) +intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) { - return IS_BROXTON(dev_priv) && intel_vtd_active(); + return IS_BROXTON(i915) && intel_vtd_active(); +} + +static inline bool +intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) +{ + return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); } /* i915_drv.c */ @@ -1802,6 +1808,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) #define I915_GEM_OBJECT_UNBIND_TEST BIT(2) +#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b23f58e94cfb..07da6a9342e3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -157,8 +157,19 @@ try_again: if (vma) { ret = -EBUSY; if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || - !i915_vma_is_active(vma)) - ret = i915_vma_unbind(vma); + !i915_vma_is_active(vma)) { + if (i915_is_ggtt(vma->vm) && + flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK) { + if (mutex_trylock(&vma->vm->mutex)) { + ret = __i915_vma_unbind(vma); + mutex_unlock(&vma->vm->mutex); + } else { + ret = -EBUSY; + } + } else { + ret = i915_vma_unbind(vma); + } + } __i915_vma_put(vma); } -- cgit From 36150bba44329d7a96e58498bfc91fee9a2de08c Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Apr 2021 11:30:49 +0100 Subject: drm/i915: mark stolen as private MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the next patch we want to expose the supported regions to userspace, which can then be fed into the gem_create_ext placement extensions. For now treat stolen memory as private from userspace pov. Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Cc: Thomas Hellström Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Cc: Jon Bloomfield Cc: Jordan Justen Cc: Daniel Vetter Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Cc: mesa-dev@lists.freedesktop.org Reviewed-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20210429103056.407067-2-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 4 ++++ drivers/gpu/drm/i915/intel_memory_region.h | 1 + 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index e1a32672bbe8..293f640faa0a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -803,6 +803,8 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915) intel_memory_region_set_name(mem, "stolen-local"); + mem->private = true; + return mem; } @@ -821,6 +823,8 @@ i915_gem_stolen_smem_setup(struct drm_i915_private *i915) intel_memory_region_set_name(mem, "stolen-system"); + mem->private = true; + return mem; } diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 4c8ec15af55f..942fc4f68764 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -86,6 +86,7 @@ struct intel_memory_region { u16 instance; enum intel_region_id id; char name[16]; + bool private; /* not for userspace */ struct list_head reserved; -- cgit From 357814f878f97a82fd7b4146e1a50822847c4ed7 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Apr 2021 11:30:51 +0100 Subject: drm/i915: rework gem_create flow for upcoming extensions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the upcoming gem_create_ext we want to be able create a "vanilla" object upfront and pass that directly to the extensions, before actually initialising the object. Functionally this should be the same expect we now feed the object into the lower-level region specific init_object. Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Cc: Thomas Hellström Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Cc: Jon Bloomfield Cc: Jordan Justen Cc: Daniel Vetter Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Cc: mesa-dev@lists.freedesktop.org Reviewed-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20210429103056.407067-4-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_create.c | 92 +++++++++++++++++++++--------- 1 file changed, 65 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 45d60e3d98e3..409226df0dd2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -7,41 +7,51 @@ #include "gem/i915_gem_region.h" #include "i915_drv.h" +#include "i915_trace.h" + +static int i915_gem_publish(struct drm_i915_gem_object *obj, + struct drm_file *file, + u64 *size_p, + u32 *handle_p) +{ + u64 size = obj->base.size; + int ret; + + ret = drm_gem_handle_create(file, &obj->base, handle_p); + /* drop reference from allocate - handle holds it now */ + i915_gem_object_put(obj); + if (ret) + return ret; + + *size_p = size; + return 0; +} static int -i915_gem_create(struct drm_file *file, - struct intel_memory_region *mr, - u64 *size_p, - u32 *handle_p) +i915_gem_setup(struct drm_i915_gem_object *obj, + struct intel_memory_region *mr, + u64 size) { - struct drm_i915_gem_object *obj; - u32 handle; - u64 size; int ret; GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); - size = round_up(*size_p, mr->min_page_size); + size = round_up(size, mr->min_page_size); if (size == 0) return -EINVAL; /* For most of the ABI (e.g. mmap) we think in system pages */ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); - /* Allocate the new object */ - obj = i915_gem_object_create_region(mr, size, 0); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - GEM_BUG_ON(size != obj->base.size); + if (i915_gem_object_size_2big(size)) + return -E2BIG; - ret = drm_gem_handle_create(file, &obj->base, &handle); - /* drop reference from allocate - handle holds it now */ - i915_gem_object_put(obj); + ret = mr->ops->init_object(mr, obj, size, 0); if (ret) return ret; - *handle_p = handle; - *size_p = size; + GEM_BUG_ON(size != obj->base.size); + + trace_i915_gem_object_create(obj); return 0; } @@ -50,9 +60,11 @@ i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { + struct drm_i915_gem_object *obj; enum intel_memory_type mem_type; int cpp = DIV_ROUND_UP(args->bpp, 8); u32 format; + int ret; switch (cpp) { case 1: @@ -85,10 +97,22 @@ i915_gem_dumb_create(struct drm_file *file, if (HAS_LMEM(to_i915(dev))) mem_type = INTEL_MEMORY_LOCAL; - return i915_gem_create(file, - intel_memory_region_by_type(to_i915(dev), - mem_type), - &args->size, &args->handle); + obj = i915_gem_object_alloc(); + if (!obj) + return -ENOMEM; + + ret = i915_gem_setup(obj, + intel_memory_region_by_type(to_i915(dev), + mem_type), + args->size); + if (ret) + goto object_free; + + return i915_gem_publish(obj, file, &args->size, &args->handle); + +object_free: + i915_gem_object_free(obj); + return ret; } /** @@ -103,11 +127,25 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_create *args = data; + struct drm_i915_gem_object *obj; + int ret; i915_gem_flush_free_objects(i915); - return i915_gem_create(file, - intel_memory_region_by_type(i915, - INTEL_MEMORY_SYSTEM), - &args->size, &args->handle); + obj = i915_gem_object_alloc(); + if (!obj) + return -ENOMEM; + + ret = i915_gem_setup(obj, + intel_memory_region_by_type(i915, + INTEL_MEMORY_SYSTEM), + args->size); + if (ret) + goto object_free; + + return i915_gem_publish(obj, file, &args->size, &args->handle); + +object_free: + i915_gem_object_free(obj); + return ret; } -- cgit From ebcb40298947bdb0622e53c69734e6b4fb64b348 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Apr 2021 11:30:52 +0100 Subject: drm/i915/uapi: introduce drm_i915_gem_create_ext Same old gem_create but with now with extensions support. This is needed to support various upcoming usecases. v2:(Chris) - Use separate ioctl number for gem_create_ext, instead of hijacking the existing gem_create ioctl, otherwise we run into the issue with being unable to detect if the kernel supports the new extension behaviour. - We now have gem_create_ext.flags, which should be zeroed. - I915_GEM_CREATE_EXT_SETPARAM value is now zero, since this is the index into our array of extensions. - Setup a "vanilla" object which we can directly apply our extensions to. v3:(Daniel & Jason) - drop I915_GEM_CREATE_EXT_SETPARAM. Instead just have each extension do one thing only, instead of generic setparam which can cover various use cases. - add some kernel-doc. Signed-off-by: Matthew Auld Signed-off-by: CQ Tang Cc: Joonas Lahtinen Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Cc: Jordan Justen Cc: Daniel Vetter Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Cc: mesa-dev@lists.freedesktop.org Reviewed-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20210429103056.407067-5-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_create.c | 56 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_ioctls.h | 2 ++ drivers/gpu/drm/i915/i915_drv.c | 1 + include/uapi/drm/i915_drm.h | 42 ++++++++++++++++++++++ 4 files changed, 101 insertions(+) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 409226df0dd2..1e82633a3898 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "i915_trace.h" +#include "i915_user_extensions.h" static int i915_gem_publish(struct drm_i915_gem_object *obj, struct drm_file *file, @@ -149,3 +150,58 @@ object_free: i915_gem_object_free(obj); return ret; } + +struct create_ext { + struct drm_i915_private *i915; + struct drm_i915_gem_object *vanilla_object; +}; + +static const i915_user_extension_fn create_extensions[] = { +}; + +/** + * Creates a new mm object and returns a handle to it. + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer + */ +int +i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_create_ext *args = data; + struct create_ext ext_data = { .i915 = i915 }; + struct drm_i915_gem_object *obj; + int ret; + + if (args->flags) + return -EINVAL; + + i915_gem_flush_free_objects(i915); + + obj = i915_gem_object_alloc(); + if (!obj) + return -ENOMEM; + + ext_data.vanilla_object = obj; + ret = i915_user_extensions(u64_to_user_ptr(args->extensions), + create_extensions, + ARRAY_SIZE(create_extensions), + &ext_data); + if (ret) + goto object_free; + + ret = i915_gem_setup(obj, + intel_memory_region_by_type(i915, + INTEL_MEMORY_SYSTEM), + args->size); + if (ret) + goto object_free; + + return i915_gem_publish(obj, file, &args->size, &args->handle); + +object_free: + i915_gem_object_free(obj); + return ret; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h index 7fd22f3efbef..28d6526e32ab 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h @@ -14,6 +14,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3286bcd0a6d5..f50f7b4ff541 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1705,6 +1705,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 582ee6f7d595..9ab609e0cabb 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -406,6 +406,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_QUERY 0x39 #define DRM_I915_GEM_VM_CREATE 0x3a #define DRM_I915_GEM_VM_DESTROY 0x3b +#define DRM_I915_GEM_CREATE_EXT 0x3c /* Must be kept compact -- no holes */ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) @@ -438,6 +439,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) +#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext) #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) @@ -2598,6 +2600,46 @@ struct drm_i915_query_memory_regions { struct drm_i915_memory_region_info regions[]; }; +/** + * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added + * extension support using struct i915_user_extension. + * + * Note that in the future we want to have our buffer flags here, at least for + * the stuff that is immutable. Previously we would have two ioctls, one to + * create the object with gem_create, and another to apply various parameters, + * however this creates some ambiguity for the params which are considered + * immutable. Also in general we're phasing out the various SET/GET ioctls. + */ +struct drm_i915_gem_create_ext { + /** + * @size: Requested size for the object. + * + * The (page-aligned) allocated size for the object will be returned. + * + */ + __u64 size; + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + /** @flags: MBZ */ + __u32 flags; + /** + * @extensions: The chain of extensions to apply to this object. + * + * This will be useful in the future when we need to support several + * different extensions, and we need to apply more than one when + * creating the object. See struct i915_user_extension. + * + * If we don't supply any extensions then we get the same old gem_create + * behaviour. + * + */ + __u64 extensions; +}; + #if defined(__cplusplus) } #endif -- cgit From 2459e56fd8af0b47fcbfbdff2fdc02e4077680ec Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Apr 2021 11:30:53 +0100 Subject: drm/i915/uapi: implement object placement extension Add new extension to support setting an immutable-priority-list of potential placements, at creation time. If we use the normal gem_create or gem_create_ext without the extensions/placements then we still get the old behaviour with only placing the object in system memory. v2(Daniel & Jason): - Add a bunch of kernel-doc - Simplify design for placements extension Testcase: igt/gem_create/create-ext-placement-sanity-check Testcase: igt/gem_create/create-ext-placement-each Testcase: igt/gem_create/create-ext-placement-all Signed-off-by: Matthew Auld Signed-off-by: CQ Tang Cc: Joonas Lahtinen Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Cc: Jordan Justen Cc: Daniel Vetter Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Cc: mesa-dev@lists.freedesktop.org Reviewed-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20210429103056.407067-6-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_create.c | 218 +++++++++++++++++++-- drivers/gpu/drm/i915/gem/i915_gem_object.c | 3 + drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 6 + drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 26 +++ drivers/gpu/drm/i915/intel_memory_region.c | 16 ++ drivers/gpu/drm/i915/intel_memory_region.h | 4 + include/uapi/drm/i915_drm.h | 62 ++++++ 7 files changed, 318 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 1e82633a3898..957f790c644b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -4,12 +4,51 @@ */ #include "gem/i915_gem_ioctls.h" +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_trace.h" #include "i915_user_extensions.h" +static u32 object_max_page_size(struct drm_i915_gem_object *obj) +{ + u32 max_page_size = 0; + int i; + + for (i = 0; i < obj->mm.n_placements; i++) { + struct intel_memory_region *mr = obj->mm.placements[i]; + + GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); + max_page_size = max_t(u32, max_page_size, mr->min_page_size); + } + + GEM_BUG_ON(!max_page_size); + return max_page_size; +} + +static void object_set_placements(struct drm_i915_gem_object *obj, + struct intel_memory_region **placements, + unsigned int n_placements) +{ + GEM_BUG_ON(!n_placements); + + /* + * For the common case of one memory region, skip storing an + * allocated array and just point at the region directly. + */ + if (n_placements == 1) { + struct intel_memory_region *mr = placements[0]; + struct drm_i915_private *i915 = mr->i915; + + obj->mm.placements = &i915->mm.regions[mr->id]; + obj->mm.n_placements = 1; + } else { + obj->mm.placements = placements; + obj->mm.n_placements = n_placements; + } +} + static int i915_gem_publish(struct drm_i915_gem_object *obj, struct drm_file *file, u64 *size_p, @@ -29,14 +68,12 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj, } static int -i915_gem_setup(struct drm_i915_gem_object *obj, - struct intel_memory_region *mr, - u64 size) +i915_gem_setup(struct drm_i915_gem_object *obj, u64 size) { + struct intel_memory_region *mr = obj->mm.placements[0]; int ret; - GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); - size = round_up(size, mr->min_page_size); + size = round_up(size, object_max_page_size(obj)); if (size == 0) return -EINVAL; @@ -62,6 +99,7 @@ i915_gem_dumb_create(struct drm_file *file, struct drm_mode_create_dumb *args) { struct drm_i915_gem_object *obj; + struct intel_memory_region *mr; enum intel_memory_type mem_type; int cpp = DIV_ROUND_UP(args->bpp, 8); u32 format; @@ -102,10 +140,10 @@ i915_gem_dumb_create(struct drm_file *file, if (!obj) return -ENOMEM; - ret = i915_gem_setup(obj, - intel_memory_region_by_type(to_i915(dev), - mem_type), - args->size); + mr = intel_memory_region_by_type(to_i915(dev), mem_type); + object_set_placements(obj, &mr, 1); + + ret = i915_gem_setup(obj, args->size); if (ret) goto object_free; @@ -129,6 +167,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_create *args = data; struct drm_i915_gem_object *obj; + struct intel_memory_region *mr; int ret; i915_gem_flush_free_objects(i915); @@ -137,10 +176,10 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, if (!obj) return -ENOMEM; - ret = i915_gem_setup(obj, - intel_memory_region_by_type(i915, - INTEL_MEMORY_SYSTEM), - args->size); + mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); + object_set_placements(obj, &mr, 1); + + ret = i915_gem_setup(obj, args->size); if (ret) goto object_free; @@ -156,7 +195,144 @@ struct create_ext { struct drm_i915_gem_object *vanilla_object; }; +static void repr_placements(char *buf, size_t size, + struct intel_memory_region **placements, + int n_placements) +{ + int i; + + buf[0] = '\0'; + + for (i = 0; i < n_placements; i++) { + struct intel_memory_region *mr = placements[i]; + int r; + + r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }", + mr->name, mr->type, mr->instance); + if (r >= size) + return; + + buf += r; + size -= r; + } +} + +static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args, + struct create_ext *ext_data) +{ + struct drm_i915_private *i915 = ext_data->i915; + struct drm_i915_gem_memory_class_instance __user *uregions = + u64_to_user_ptr(args->regions); + struct drm_i915_gem_object *obj = ext_data->vanilla_object; + struct intel_memory_region **placements; + u32 mask; + int i, ret = 0; + + if (args->pad) { + drm_dbg(&i915->drm, "pad should be zero\n"); + ret = -EINVAL; + } + + if (!args->num_regions) { + drm_dbg(&i915->drm, "num_regions is zero\n"); + ret = -EINVAL; + } + + if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) { + drm_dbg(&i915->drm, "num_regions is too large\n"); + ret = -EINVAL; + } + + if (ret) + return ret; + + placements = kmalloc_array(args->num_regions, + sizeof(struct intel_memory_region *), + GFP_KERNEL); + if (!placements) + return -ENOMEM; + + mask = 0; + for (i = 0; i < args->num_regions; i++) { + struct drm_i915_gem_memory_class_instance region; + struct intel_memory_region *mr; + + if (copy_from_user(®ion, uregions, sizeof(region))) { + ret = -EFAULT; + goto out_free; + } + + mr = intel_memory_region_lookup(i915, + region.memory_class, + region.memory_instance); + if (!mr || mr->private) { + drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n", + region.memory_class, region.memory_instance, i); + ret = -EINVAL; + goto out_dump; + } + + if (mask & BIT(mr->id)) { + drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n", + mr->name, region.memory_class, + region.memory_instance, i); + ret = -EINVAL; + goto out_dump; + } + + placements[i] = mr; + mask |= BIT(mr->id); + + ++uregions; + } + + if (obj->mm.placements) { + ret = -EINVAL; + goto out_dump; + } + + object_set_placements(obj, placements, args->num_regions); + if (args->num_regions == 1) + kfree(placements); + + return 0; + +out_dump: + if (1) { + char buf[256]; + + if (obj->mm.placements) { + repr_placements(buf, + sizeof(buf), + obj->mm.placements, + obj->mm.n_placements); + drm_dbg(&i915->drm, + "Placements were already set in previous EXT. Existing placements: %s\n", + buf); + } + + repr_placements(buf, sizeof(buf), placements, i); + drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf); + } + +out_free: + kfree(placements); + return ret; +} + +static int ext_set_placements(struct i915_user_extension __user *base, + void *data) +{ + struct drm_i915_gem_create_ext_memory_regions ext; + + if (copy_from_user(&ext, base, sizeof(ext))) + return -EFAULT; + + return set_placements(&ext, data); +} + static const i915_user_extension_fn create_extensions[] = { + [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, }; /** @@ -172,6 +348,7 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_create_ext *args = data; struct create_ext ext_data = { .i915 = i915 }; + struct intel_memory_region **placements_ext; struct drm_i915_gem_object *obj; int ret; @@ -189,19 +366,26 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, create_extensions, ARRAY_SIZE(create_extensions), &ext_data); + placements_ext = obj->mm.placements; if (ret) goto object_free; - ret = i915_gem_setup(obj, - intel_memory_region_by_type(i915, - INTEL_MEMORY_SYSTEM), - args->size); + if (!placements_ext) { + struct intel_memory_region *mr = + intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); + + object_set_placements(obj, &mr, 1); + } + + ret = i915_gem_setup(obj, args->size); if (ret) goto object_free; return i915_gem_publish(obj, file, &args->size, &args->handle); object_free: + if (obj->mm.n_placements > 1) + kfree(placements_ext); i915_gem_object_free(obj); return ret; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index ea74cbca95be..28144410df86 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -249,6 +249,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (obj->ops->release) obj->ops->release(obj); + if (obj->mm.n_placements > 1) + kfree(obj->mm.placements); + /* But keep the pointer alive for RCU-protected lookups */ call_rcu(&obj->rcu, __i915_gem_free_object_rcu); cond_resched(); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 8e485cb3343c..69d6e54bc569 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -219,6 +219,12 @@ struct drm_i915_gem_object { atomic_t pages_pin_count; atomic_t shrink_pin; + /** + * Priority list of potential placements for this object. + */ + struct intel_memory_region **placements; + int n_placements; + /** * Memory region for this object. */ diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 5cf6df49c333..05a3b29f545e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -842,6 +842,24 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) return true; } +static void object_set_placements(struct drm_i915_gem_object *obj, + struct intel_memory_region **placements, + unsigned int n_placements) +{ + GEM_BUG_ON(!n_placements); + + if (n_placements == 1) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_memory_region *mr = placements[0]; + + obj->mm.placements = &i915->mm.regions[mr->id]; + obj->mm.n_placements = 1; + } else { + obj->mm.placements = placements; + obj->mm.n_placements = n_placements; + } +} + #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) static int __igt_mmap(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, @@ -950,6 +968,8 @@ static int igt_mmap(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); + object_set_placements(obj, &mr, 1); + err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); @@ -1068,6 +1088,8 @@ static int igt_mmap_access(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); + object_set_placements(obj, &mr, 1); + err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB); @@ -1211,6 +1233,8 @@ static int igt_mmap_gpu(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); + object_set_placements(obj, &mr, 1); + err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); @@ -1354,6 +1378,8 @@ static int igt_mmap_revoke(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); + object_set_placements(obj, &mr, 1); + err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 481a487faca6..d98e8b81d322 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -28,6 +28,22 @@ static const struct { }, }; +struct intel_memory_region * +intel_memory_region_lookup(struct drm_i915_private *i915, + u16 class, u16 instance) +{ + struct intel_memory_region *mr; + int id; + + /* XXX: consider maybe converting to an rb tree at some point */ + for_each_memory_region(mr, i915, id) { + if (mr->type == class && mr->instance == instance) + return mr; + } + + return NULL; +} + struct intel_memory_region * intel_memory_region_by_type(struct drm_i915_private *i915, enum intel_memory_type mem_type) diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 7cd8e3d66a7f..d24ce5a0b30b 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -97,6 +97,10 @@ struct intel_memory_region { } objects; }; +struct intel_memory_region * +intel_memory_region_lookup(struct drm_i915_private *i915, + u16 class, u16 instance); + int intel_memory_region_init_buddy(struct intel_memory_region *mem); void intel_memory_region_release_buddy(struct intel_memory_region *mem); diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 9ab609e0cabb..c2c7759b7d2e 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -2616,6 +2616,11 @@ struct drm_i915_gem_create_ext { * * The (page-aligned) allocated size for the object will be returned. * + * Note that for some devices we have might have further minimum + * page-size restrictions(larger than 4K), like for device local-memory. + * However in general the final size here should always reflect any + * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS + * extension to place the object in device local-memory. */ __u64 size; /** @@ -2636,10 +2641,67 @@ struct drm_i915_gem_create_ext { * If we don't supply any extensions then we get the same old gem_create * behaviour. * + * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see + * struct drm_i915_gem_create_ext_memory_regions. */ +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 __u64 extensions; }; +/** + * struct drm_i915_gem_create_ext_memory_regions - The + * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension. + * + * Set the object with the desired set of placements/regions in priority + * order. Each entry must be unique and supported by the device. + * + * This is provided as an array of struct drm_i915_gem_memory_class_instance, or + * an equivalent layout of class:instance pair encodings. See struct + * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to + * query the supported regions for a device. + * + * As an example, on discrete devices, if we wish to set the placement as + * device local-memory we can do something like: + * + * .. code-block:: C + * + * struct drm_i915_gem_memory_class_instance region_lmem = { + * .memory_class = I915_MEMORY_CLASS_DEVICE, + * .memory_instance = 0, + * }; + * struct drm_i915_gem_create_ext_memory_regions regions = { + * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, + * .regions = (uintptr_t)®ion_lmem, + * .num_regions = 1, + * }; + * struct drm_i915_gem_create_ext create_ext = { + * .size = 16 * PAGE_SIZE, + * .extensions = (uintptr_t)®ions, + * }; + * + * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); + * if (err) ... + * + * At which point we get the object handle in &drm_i915_gem_create_ext.handle, + * along with the final object size in &drm_i915_gem_create_ext.size, which + * should account for any rounding up, if required. + */ +struct drm_i915_gem_create_ext_memory_regions { + /** @base: Extension link. See struct i915_user_extension. */ + struct i915_user_extension base; + + /** @pad: MBZ */ + __u32 pad; + /** @num_regions: Number of elements in the @regions array. */ + __u32 num_regions; + /** + * @regions: The regions/placements array. + * + * An array of struct drm_i915_gem_memory_class_instance. + */ + __u64 regions; +}; + #if defined(__cplusplus) } #endif -- cgit From 4f869f1dbd7eac42f3d4b129b8c941de54de77bf Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Apr 2021 11:30:54 +0100 Subject: drm/i915/lmem: support optional CPU clearing for special internal use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For some internal device local-memory objects it would be useful to have an option to CPU clear the pages upon gathering the backing store. Note that this might be before the blitter is useable, which is the case for some internal GuC objects. Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Cc: Thomas Hellström Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Cc: Jon Bloomfield Cc: Jordan Justen Cc: Daniel Vetter Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Cc: mesa-dev@lists.freedesktop.org Acked-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20210429103056.407067-7-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 8 +- drivers/gpu/drm/i915/gem/i915_gem_region.c | 22 ++++++ .../gpu/drm/i915/selftests/intel_memory_region.c | 87 +++++++++++++++++++++- 3 files changed, 113 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 69d6e54bc569..0727d0c76aa0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -172,11 +172,13 @@ struct drm_i915_gem_object { #define I915_BO_ALLOC_CONTIGUOUS BIT(0) #define I915_BO_ALLOC_VOLATILE BIT(1) #define I915_BO_ALLOC_STRUCT_PAGE BIT(2) +#define I915_BO_ALLOC_CPU_CLEAR BIT(3) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \ I915_BO_ALLOC_VOLATILE | \ - I915_BO_ALLOC_STRUCT_PAGE) -#define I915_BO_READONLY BIT(3) -#define I915_TILING_QUIRK_BIT 4 /* unknown swizzling; do not release! */ + I915_BO_ALLOC_STRUCT_PAGE | \ + I915_BO_ALLOC_CPU_CLEAR) +#define I915_BO_READONLY BIT(4) +#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */ /* * Is the object to be mapped as read-only to the GPU diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index 6a84fb6dde24..ce8fcfc54079 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -95,6 +95,28 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) sg_mark_end(sg); i915_sg_trim(st); + /* Intended for kernel internal use only */ + if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) { + struct scatterlist *sg; + unsigned long i; + + for_each_sg(st->sgl, sg, st->nents, i) { + unsigned int length; + void __iomem *vaddr; + dma_addr_t daddr; + + daddr = sg_dma_address(sg); + daddr -= mem->region.start; + length = sg_dma_len(sg); + + vaddr = io_mapping_map_wc(&mem->iomap, daddr, length); + memset64((void __force *)vaddr, 0, length / sizeof(u64)); + io_mapping_unmap(vaddr); + } + + wmb(); + } + __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index a5fc0bf3feb9..f85fd8cbfbf5 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -513,7 +513,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) if (err) return err; - ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); + ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -593,7 +593,9 @@ static int igt_gpu_write(struct i915_gem_context *ctx, if (err) break; + i915_gem_object_lock(obj, NULL); err = igt_cpu_check(obj, dword, rng); + i915_gem_object_unlock(obj); if (err) break; } while (!__igt_timeout(end_time, NULL)); @@ -629,6 +631,88 @@ out_put: return err; } +static int igt_lmem_create_cleared_cpu(void *arg) +{ + struct drm_i915_private *i915 = arg; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + u32 size, i; + int err; + + i915_gem_drain_freed_objects(i915); + + size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng)); + size = round_up(size, PAGE_SIZE); + i = 0; + + do { + struct drm_i915_gem_object *obj; + unsigned int flags; + u32 dword, val; + void *vaddr; + + /* + * Alternate between cleared and uncleared allocations, while + * also dirtying the pages each time to check that the pages are + * always cleared if requested, since we should get some overlap + * of the underlying pages, if not all, since we are the only + * user. + */ + + flags = I915_BO_ALLOC_CPU_CLEAR; + if (i & 1) + flags = 0; + + obj = i915_gem_object_create_lmem(i915, size, flags); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + i915_gem_object_lock(obj, NULL); + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32), + &prng); + + if (flags & I915_BO_ALLOC_CPU_CLEAR) { + err = igt_cpu_check(obj, dword, 0); + if (err) { + pr_err("%s failed with size=%u, flags=%u\n", + __func__, size, flags); + goto out_unpin; + } + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out_unpin; + } + + val = prandom_u32_state(&prng); + + memset32(vaddr, val, obj->base.size / sizeof(u32)); + + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); +out_unpin: + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj); +out_put: + i915_gem_object_unlock(obj); + i915_gem_object_put(obj); + + if (err) + break; + ++i; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s completed (%u) iterations\n", __func__, i); + + return err; +} + static int igt_lmem_write_gpu(void *arg) { struct drm_i915_private *i915 = arg; @@ -1043,6 +1127,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_lmem_create), + SUBTEST(igt_lmem_create_cleared_cpu), SUBTEST(igt_lmem_write_cpu), SUBTEST(igt_lmem_write_gpu), }; -- cgit From 0e997a36ecb61b161a22980ec9240ee7537f3f62 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Apr 2021 11:30:55 +0100 Subject: drm/i915/gem: clear userspace buffers for LMEM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All userspace objects must be cleared when allocating the backing store, before they are potentially visible to userspace. For now use simple CPU based clearing to do this for device local-memory objects, note that in the near future this will instead use the blitter engine. Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Cc: Thomas Hellström Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Cc: Jon Bloomfield Cc: Jordan Justen Cc: Daniel Vetter Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Cc: mesa-dev@lists.freedesktop.org Reviewed-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20210429103056.407067-8-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_create.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 957f790c644b..f6729feae582 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -71,6 +71,7 @@ static int i915_gem_setup(struct drm_i915_gem_object *obj, u64 size) { struct intel_memory_region *mr = obj->mm.placements[0]; + unsigned int flags; int ret; size = round_up(size, object_max_page_size(obj)); @@ -83,7 +84,16 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size) if (i915_gem_object_size_2big(size)) return -E2BIG; - ret = mr->ops->init_object(mr, obj, size, 0); + /* + * For now resort to CPU based clearing for device local-memory, in the + * near future this will use the blitter engine for accelerated, GPU + * based clearing. + */ + flags = 0; + if (mr->type == INTEL_MEMORY_LOCAL) + flags = I915_BO_ALLOC_CPU_CLEAR; + + ret = mr->ops->init_object(mr, obj, size, flags); if (ret) return ret; -- cgit From 0a46be95c282956a9d3229a46e33ba701c26594c Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Apr 2021 11:30:56 +0100 Subject: drm/i915/gem: hide new uAPI behind CONFIG_BROKEN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Treat it the same as the fake local-memory stuff, where it is disabled for normal kernels, in case some random UMD is tempted to use this. Once we have all the other bits and pieces in place, like the TTM conversion, we can turn this on for real. Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Cc: Thomas Hellström Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Cc: Jon Bloomfield Cc: Jordan Justen Cc: Daniel Vetter Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Cc: mesa-dev@lists.freedesktop.org Reviewed-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20210429103056.407067-9-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_create.c | 3 +++ drivers/gpu/drm/i915/i915_query.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index f6729feae582..548ddf39d853 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -335,6 +335,9 @@ static int ext_set_placements(struct i915_user_extension __user *base, { struct drm_i915_gem_create_ext_memory_regions ext; + if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) + return -ENODEV; + if (copy_from_user(&ext, base, sizeof(ext))) return -EFAULT; diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 5e2b909827f4..e49da36c62fb 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -432,6 +432,9 @@ static int query_memregion_info(struct drm_i915_private *i915, u32 total_length; int ret, id, i; + if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) + return -ENODEV; + if (query_item->flags != 0) return -EINVAL; -- cgit From c3b147604f279051865ebef59d971fde1d2cd457 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Tue, 4 May 2021 17:41:36 +0100 Subject: drm/i915: drop the __i915_active_call pointer packing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We use some of the lower bits of the retire function pointer for potential flags, which is quite thorny, since the caller needs to remember to give the function the correct alignment with __i915_active_call, otherwise we might incorrectly unpack the pointer and jump to some garbage address later. Instead of all this let's just pass the flags along as a separate parameter. Suggested-by: Ville Syrjälä Suggested-by: Daniel Vetter References: ca419f407b43 ("drm/i915: Fix crash in auto_retire") References: d8e44e4dd221 ("drm/i915/overlay: Fix active retire callback alignment") References: fd5f262db118 ("drm/i915/selftests: Fix active retire callback alignment") Signed-off-by: Matthew Auld Reviewed-by: Matthew Brost Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210504164136.96456-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/display/intel_frontbuffer.c | 4 ++-- drivers/gpu/drm/i915/display/intel_overlay.c | 5 ++--- drivers/gpu/drm/i915/gem/i915_gem_context.c | 3 +-- drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 2 +- drivers/gpu/drm/i915/gt/intel_context.c | 3 +-- drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c | 3 +-- drivers/gpu/drm/i915/gt/intel_timeline.c | 4 ++-- drivers/gpu/drm/i915/gt/mock_engine.c | 2 +- drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c | 4 ++-- drivers/gpu/drm/i915/i915_active.c | 14 +++++--------- drivers/gpu/drm/i915/i915_active.h | 11 ++++++----- drivers/gpu/drm/i915/i915_active_types.h | 5 ----- drivers/gpu/drm/i915/i915_vma.c | 3 +-- drivers/gpu/drm/i915/selftests/i915_active.c | 4 ++-- 15 files changed, 28 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 6fc6965b6133..2a25f7bf7e5c 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -206,7 +206,6 @@ static int frontbuffer_active(struct i915_active *ref) return 0; } -__i915_active_call static void frontbuffer_retire(struct i915_active *ref) { struct intel_frontbuffer *front = @@ -261,7 +260,8 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) atomic_set(&front->bits, 0); i915_active_init(&front->write, frontbuffer_active, - i915_active_may_sleep(frontbuffer_retire)); + frontbuffer_retire, + I915_ACTIVE_RETIRE_SLEEPS); spin_lock(&i915->fb_tracking.lock); if (rcu_access_pointer(obj->frontbuffer)) { diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 8fa4313569a9..addc83a92315 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -383,8 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) i830_overlay_clock_gating(dev_priv, true); } -__i915_active_call static void -intel_overlay_last_flip_retire(struct i915_active *active) +static void intel_overlay_last_flip_retire(struct i915_active *active) { struct intel_overlay *overlay = container_of(active, typeof(*overlay), last_flip); @@ -1399,7 +1398,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) overlay->saturation = 146; i915_active_init(&overlay->last_flip, - NULL, intel_overlay_last_flip_retire); + NULL, intel_overlay_last_flip_retire, 0); ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); if (ret) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index fd8ee52e17a4..188dee13e017 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1046,7 +1046,6 @@ struct context_barrier_task { void *data; }; -__i915_active_call static void cb_retire(struct i915_active *base) { struct context_barrier_task *cb = container_of(base, typeof(*cb), base); @@ -1080,7 +1079,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, if (!cb) return -ENOMEM; - i915_active_init(&cb->base, NULL, cb_retire); + i915_active_init(&cb->base, NULL, cb_retire, 0); err = i915_active_acquire(&cb->base); if (err) { kfree(cb); diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 21b1085769be..1aee5e6b1b23 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -343,7 +343,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - i915_active_init(&vma->active, NULL, NULL); + i915_active_init(&vma->active, NULL, NULL, 0); kref_init(&vma->ref); mutex_init(&vma->pages_mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 17cf2640b082..4033184f13b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -326,7 +326,6 @@ void intel_context_unpin(struct intel_context *ce) intel_context_put(ce); } -__i915_active_call static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); @@ -385,7 +384,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) mutex_init(&ce->pin_mutex); i915_active_init(&ce->active, - __intel_context_active, __intel_context_retire); + __intel_context_active, __intel_context_retire, 0); } void intel_context_fini(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 0fa6c38893f7..7bf84cd21543 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -867,7 +867,7 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt) for (i = 0; i < num_fences; i++) { struct i915_fence_reg *fence = &ggtt->fence_regs[i]; - i915_active_init(&fence->active, NULL, NULL); + i915_active_init(&fence->active, NULL, NULL, 0); fence->ggtt = ggtt; fence->id = i; list_add_tail(&fence->link, &ggtt->fence_list); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index c59468107598..aa0a59c5b614 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -98,7 +98,6 @@ static void pool_free_work(struct work_struct *wrk) round_jiffies_up_relative(HZ)); } -__i915_active_call static void pool_retire(struct i915_active *ref) { struct intel_gt_buffer_pool_node *node = @@ -154,7 +153,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz, node->age = 0; node->pool = pool; node->pinned = false; - i915_active_init(&node->active, NULL, pool_retire); + i915_active_init(&node->active, NULL, pool_retire, 0); obj = i915_gem_object_create_internal(gt->i915, sz); if (IS_ERR(obj)) { diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index f19cf6d2fa85..c4a126c8caef 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -32,7 +32,6 @@ static struct i915_vma *hwsp_alloc(struct intel_gt *gt) return vma; } -__i915_active_call static void __timeline_retire(struct i915_active *active) { struct intel_timeline *tl = @@ -104,7 +103,8 @@ static int intel_timeline_init(struct intel_timeline *timeline, INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); - i915_active_init(&timeline->active, __timeline_active, __timeline_retire); + i915_active_init(&timeline->active, __timeline_active, + __timeline_retire, 0); return 0; } diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index e1ba03b93ffa..32589c6625e1 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -55,7 +55,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) kfree(ring); return NULL; } - i915_active_init(&ring->vma->active, NULL, NULL); + i915_active_init(&ring->vma->active, NULL, NULL, 0); __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma)); __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags); ring->vma->node.size = sz; diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index fcde223e26ff..4896e4ccad50 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -63,7 +63,7 @@ static void pulse_put(struct pulse *p) kref_put(&p->kref, pulse_free); } -__i915_active_call static void pulse_retire(struct i915_active *active) +static void pulse_retire(struct i915_active *active) { pulse_put(container_of(active, struct pulse, active)); } @@ -77,7 +77,7 @@ static struct pulse *pulse_create(void) return p; kref_init(&p->kref); - i915_active_init(&p->active, pulse_active, pulse_retire); + i915_active_init(&p->active, pulse_active, pulse_retire, 0); return p; } diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index aa573b078ae7..b1aa1c482c32 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -343,18 +343,15 @@ out: void __i915_active_init(struct i915_active *ref, int (*active)(struct i915_active *ref), void (*retire)(struct i915_active *ref), + unsigned long flags, struct lock_class_key *mkey, struct lock_class_key *wkey) { - unsigned long bits; - debug_active_init(ref); - ref->flags = 0; + ref->flags = flags; ref->active = active; - ref->retire = ptr_unpack_bits(retire, &bits, 2); - if (bits & I915_ACTIVE_MAY_SLEEP) - ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; + ref->retire = retire; spin_lock_init(&ref->tree_lock); ref->tree = RB_ROOT; @@ -1156,8 +1153,7 @@ static int auto_active(struct i915_active *ref) return 0; } -__i915_active_call static void -auto_retire(struct i915_active *ref) +static void auto_retire(struct i915_active *ref) { i915_active_put(ref); } @@ -1171,7 +1167,7 @@ struct i915_active *i915_active_create(void) return NULL; kref_init(&aa->ref); - i915_active_init(&aa->base, auto_active, auto_retire); + i915_active_init(&aa->base, auto_active, auto_retire, 0); return &aa->base; } diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index fb165d3f01cf..d0feda68b874 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -152,15 +152,16 @@ i915_active_fence_isset(const struct i915_active_fence *active) void __i915_active_init(struct i915_active *ref, int (*active)(struct i915_active *ref), void (*retire)(struct i915_active *ref), + unsigned long flags, struct lock_class_key *mkey, struct lock_class_key *wkey); /* Specialise each class of i915_active to avoid impossible lockdep cycles. */ -#define i915_active_init(ref, active, retire) do { \ - static struct lock_class_key __mkey; \ - static struct lock_class_key __wkey; \ - \ - __i915_active_init(ref, active, retire, &__mkey, &__wkey); \ +#define i915_active_init(ref, active, retire, flags) do { \ + static struct lock_class_key __mkey; \ + static struct lock_class_key __wkey; \ + \ + __i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \ } while (0) struct dma_fence * diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index 6360c3e4b765..c149f348a972 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -24,11 +24,6 @@ struct i915_active_fence { struct active_node; -#define I915_ACTIVE_MAY_SLEEP BIT(0) - -#define __i915_active_call __aligned(4) -#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2) - struct i915_active { atomic_t count; struct mutex mutex; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 468317e3b477..a6cd0fa62847 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -94,7 +94,6 @@ static int __i915_vma_active(struct i915_active *ref) return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; } -__i915_active_call static void __i915_vma_retire(struct i915_active *ref) { i915_vma_put(active_to_vma(ref)); @@ -125,7 +124,7 @@ vma_create(struct drm_i915_gem_object *obj, vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); + i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); /* Declare ourselves safe for use inside shrinkers */ if (IS_ENABLED(CONFIG_LOCKDEP)) { diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 1aa52b5cc488..61bf4560d8af 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -51,7 +51,7 @@ static int __live_active(struct i915_active *base) return 0; } -__i915_active_call static void __live_retire(struct i915_active *base) +static void __live_retire(struct i915_active *base) { struct live_active *active = container_of(base, typeof(*active), base); @@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915) return NULL; kref_init(&active->ref); - i915_active_init(&active->base, __live_active, __live_retire); + i915_active_init(&active->base, __live_active, __live_retire, 0); return active; } -- cgit From 00d6dc3da1d76bdd735cd47d026b5a0d673559f7 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 7 May 2021 10:59:48 +0100 Subject: drm/i915/stolen: shuffle around init_memory_region MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We generally want to first call i915_gem_object_init_memory_region() before calling into get_pages(), since this sets up various bits of state which might be needed there. Currently for stolen this doesn't matter much, but it might in the future, and at the very least this makes things consistent with the other backends. Signed-off-by: Matthew Auld Cc: Thomas Hellström Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20210507095948.384230-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 293f640faa0a..b5553fc3ac4d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -657,9 +657,11 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, if (WARN_ON(!i915_gem_object_trylock(obj))) return -EBUSY; + i915_gem_object_init_memory_region(obj, mem); + err = i915_gem_object_pin_pages(obj); - if (!err) - i915_gem_object_init_memory_region(obj, mem); + if (err) + i915_gem_object_release_memory_region(obj); i915_gem_object_unlock(obj); return err; -- cgit From 8777d17b68dcfbfbd4d524f444adefae56f41225 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 17 May 2021 09:46:40 +0100 Subject: drm/i915/gem: Pin the L-shape quirked object as unshrinkable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When instantiating a tiled object on an L-shaped memory machine, we mark the object as unshrinkable to prevent the shrinker from trying to swap out the pages. We have to do this as we do not know the swizzling on the individual pages, and so the data will be scrambled across swap out/in. Not only do we need to move the object off the shrinker list, we need to mark the object with shrink_pin so that the counter is consistent across calls to madvise. v2: in the madvise ioctl we need to check if the object is currently shrinkable/purgeable, not if the object type supports shrinking Fixes: 0175969e489a ("drm/i915/gem: Use shrinkable status for unknown swizzle quirks") References: https://gitlab.freedesktop.org/drm/intel/-/issues/3293 References: https://gitlab.freedesktop.org/drm/intel/-/issues/3450 Reported-by: Ville Syrjälä Tested-by: Ville Syrjälä Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Signed-off-by: Matthew Auld Cc: # v5.12+ Link: https://patchwork.freedesktop.org/patch/msgid/20210517084640.18862-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 11 +++++------ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index aed8a37ccdc9..7361971c177d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_set_tiling_quirk(obj); + GEM_BUG_ON(!list_empty(&obj->mm.link)); + atomic_inc(&obj->mm.shrink_pin); shrinkable = false; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d0018c5f88bd..cffd7f4f87dc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1009,12 +1009,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->mm.madv = args->madv; if (i915_gem_object_has_pages(obj)) { - struct list_head *list; + unsigned long flags; - if (i915_gem_object_is_shrinkable(obj)) { - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); + spin_lock_irqsave(&i915->mm.obj_lock, flags); + if (!list_empty(&obj->mm.link)) { + struct list_head *list; if (obj->mm.madv != I915_MADV_WILLNEED) list = &i915->mm.purge_list; @@ -1022,8 +1021,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, list = &i915->mm.shrink_list; list_move_tail(&obj->mm.link, list); - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } /* if the object is no longer attached, discard its backing storage */ -- cgit From f36709216e9b65872e43035dfcf8a5f7d45041f1 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Sun, 16 May 2021 20:59:35 +0200 Subject: drm/i915: Don't include drm_legacy.h i915 does not use DRM legacy code. Remove the rsp include statements. Signed-off-by: Thomas Zimmermann Acked-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210516185937.5644-2-tzimmermann@suse.de --- drivers/gpu/drm/i915/gem/i915_gem_phys.c | 1 - drivers/gpu/drm/i915/i915_drv.h | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 81dc2bf59bc3..51a05e62875d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -8,7 +8,6 @@ #include #include -#include /* for drm_legacy.h! */ #include #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9ec9277539ec..d1c9f0fb0a21 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -51,7 +51,6 @@ #include #include -#include /* for struct drm_dma_handle */ #include #include #include -- cgit From 4d8151ae5329cf50781a02fd2298a909589a5bab Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Tue, 1 Jun 2021 09:46:41 +0200 Subject: drm/i915: Don't free shared locks while shared MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are currently sharing the VM reservation locks across a number of gem objects with page-table memory. Since TTM will individiualize the reservation locks when freeing objects, including accessing the shared locks, make sure that the shared locks are not freed until that is done. For PPGTT we add an additional refcount, for GGTT we take additional measures to make sure objects sharing the GGTT reservation lock are freed at GGTT takedown Signed-off-by: Thomas Hellström Reviewed-by: Maarten Lankhorst Signed-off-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210601074654.3103-3-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 3 ++ drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 4 +++ drivers/gpu/drm/i915/gt/intel_ggtt.c | 19 ++++++++-- drivers/gpu/drm/i915/gt/intel_gtt.c | 45 +++++++++++++++++++----- drivers/gpu/drm/i915/gt/intel_gtt.h | 28 ++++++++++++++- drivers/gpu/drm/i915/gt/intel_ppgtt.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 5 +++ 7 files changed, 93 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 28144410df86..2be6109d0093 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -252,6 +252,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (obj->mm.n_placements > 1) kfree(obj->mm.placements); + if (obj->shares_resv_from) + i915_vm_resv_put(obj->shares_resv_from); + /* But keep the pointer alive for RCU-protected lookups */ call_rcu(&obj->rcu, __i915_gem_free_object_rcu); cond_resched(); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 0727d0c76aa0..0415f99b6b95 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -149,6 +149,10 @@ struct drm_i915_gem_object { * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called. */ struct list_head obj_link; + /** + * @shared_resv_from: The object shares the resv from this vm. + */ + struct i915_address_space *shares_resv_from; union { struct rcu_head rcu; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index dcb3b299cf4a..efcd5aeac936 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -745,7 +745,6 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) mutex_unlock(&ggtt->vm.mutex); i915_address_space_fini(&ggtt->vm); - dma_resv_fini(&ggtt->vm.resv); arch_phys_wc_del(ggtt->mtrr); @@ -767,6 +766,19 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915) ggtt_cleanup_hw(ggtt); } +/** + * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after + * all free objects have been drained. + * @i915: i915 device + */ +void i915_ggtt_driver_late_release(struct drm_i915_private *i915) +{ + struct i915_ggtt *ggtt = &i915->ggtt; + + GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); + dma_resv_fini(&ggtt->vm._resv); +} + static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) { snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; @@ -828,6 +840,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return -ENOMEM; } + kref_init(&ggtt->vm.resv_ref); ret = setup_scratch_page(&ggtt->vm); if (ret) { drm_err(&i915->drm, "Scratch setup failed\n"); @@ -1134,7 +1147,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) ggtt->vm.gt = gt; ggtt->vm.i915 = i915; ggtt->vm.dma = i915->drm.dev; - dma_resv_init(&ggtt->vm.resv); + dma_resv_init(&ggtt->vm._resv); if (INTEL_GEN(i915) <= 5) ret = i915_gmch_probe(ggtt); @@ -1143,7 +1156,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) else ret = gen8_gmch_probe(ggtt); if (ret) { - dma_resv_fini(&ggtt->vm.resv); + dma_resv_fini(&ggtt->vm._resv); return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 9b98f9d9faa3..94849567143d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -22,8 +22,11 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) * object underneath, with the idea that one object_lock() will lock * them all at once. */ - if (!IS_ERR(obj)) - obj->base.resv = &vm->resv; + if (!IS_ERR(obj)) { + obj->base.resv = i915_vm_resv_get(vm); + obj->shares_resv_from = vm; + } + return obj; } @@ -40,8 +43,11 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) * object underneath, with the idea that one object_lock() will lock * them all at once. */ - if (!IS_ERR(obj)) - obj->base.resv = &vm->resv; + if (!IS_ERR(obj)) { + obj->base.resv = i915_vm_resv_get(vm); + obj->shares_resv_from = vm; + } + return obj; } @@ -102,7 +108,7 @@ void __i915_vm_close(struct i915_address_space *vm) int i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) { - if (vm->scratch[0]->base.resv == &vm->resv) { + if (vm->scratch[0]->base.resv == &vm->_resv) { return i915_gem_object_lock(vm->scratch[0], ww); } else { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); @@ -118,6 +124,22 @@ void i915_address_space_fini(struct i915_address_space *vm) mutex_destroy(&vm->mutex); } +/** + * i915_vm_resv_release - Final struct i915_address_space destructor + * @kref: Pointer to the &i915_address_space.resv_ref member. + * + * This function is called when the last lock sharer no longer shares the + * &i915_address_space._resv lock. + */ +void i915_vm_resv_release(struct kref *kref) +{ + struct i915_address_space *vm = + container_of(kref, typeof(*vm), resv_ref); + + dma_resv_fini(&vm->_resv); + kfree(vm); +} + static void __i915_vm_release(struct work_struct *work) { struct i915_address_space *vm = @@ -125,9 +147,8 @@ static void __i915_vm_release(struct work_struct *work) vm->cleanup(vm); i915_address_space_fini(vm); - dma_resv_fini(&vm->resv); - kfree(vm); + i915_vm_resv_put(vm); } void i915_vm_release(struct kref *kref) @@ -144,6 +165,14 @@ void i915_vm_release(struct kref *kref) void i915_address_space_init(struct i915_address_space *vm, int subclass) { kref_init(&vm->ref); + + /* + * Special case for GGTT that has already done an early + * kref_init here. + */ + if (!kref_read(&vm->resv_ref)) + kref_init(&vm->resv_ref); + INIT_RCU_WORK(&vm->rcu, __i915_vm_release); atomic_set(&vm->open, 1); @@ -170,7 +199,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) might_alloc(GFP_KERNEL); mutex_release(&vm->mutex.dep_map, _THIS_IP_); } - dma_resv_init(&vm->resv); + dma_resv_init(&vm->_resv); GEM_BUG_ON(!vm->total); drm_mm_init(&vm->mm, 0, vm->total); diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 44ce27c51631..86879af7b4a9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -245,7 +245,9 @@ struct i915_address_space { atomic_t open; struct mutex mutex; /* protects vma and our lists */ - struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */ + + struct kref resv_ref; /* kref to keep the reservation lock alive. */ + struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */ #define VM_CLASS_GGTT 0 #define VM_CLASS_PPGTT 1 @@ -399,13 +401,36 @@ i915_vm_get(struct i915_address_space *vm) return vm; } +/** + * i915_vm_resv_get - Obtain a reference on the vm's reservation lock + * @vm: The vm whose reservation lock we want to share. + * + * Return: A pointer to the vm's reservation lock. + */ +static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm) +{ + kref_get(&vm->resv_ref); + return &vm->_resv; +} + void i915_vm_release(struct kref *kref); +void i915_vm_resv_release(struct kref *kref); + static inline void i915_vm_put(struct i915_address_space *vm) { kref_put(&vm->ref, i915_vm_release); } +/** + * i915_vm_resv_put - Release a reference on the vm's reservation lock + * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get() + */ +static inline void i915_vm_resv_put(struct i915_address_space *vm) +{ + kref_put(&vm->resv_ref, i915_vm_resv_release); +} + static inline struct i915_address_space * i915_vm_open(struct i915_address_space *vm) { @@ -501,6 +526,7 @@ void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); int i915_init_ggtt(struct drm_i915_private *i915); void i915_ggtt_driver_release(struct drm_i915_private *i915); +void i915_ggtt_driver_late_release(struct drm_i915_private *i915); static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) { diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 4e3d80c2295c..aee3a8929245 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -307,7 +307,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) ppgtt->vm.dma = i915->drm.dev; ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); - dma_resv_init(&ppgtt->vm.resv); + dma_resv_init(&ppgtt->vm._resv); i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f50f7b4ff541..2db9d8f59eef 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -630,6 +630,8 @@ err_mem_regions: intel_memory_regions_driver_release(dev_priv); err_ggtt: i915_ggtt_driver_release(dev_priv); + i915_gem_drain_freed_objects(dev_priv); + i915_ggtt_driver_late_release(dev_priv); err_perf: i915_perf_fini(dev_priv); return ret; @@ -880,6 +882,8 @@ out_cleanup_hw: i915_driver_hw_remove(i915); intel_memory_regions_driver_release(i915); i915_ggtt_driver_release(i915); + i915_gem_drain_freed_objects(i915); + i915_ggtt_driver_late_release(i915); out_cleanup_mmio: i915_driver_mmio_release(i915); out_runtime_pm_put: @@ -936,6 +940,7 @@ static void i915_driver_release(struct drm_device *dev) intel_memory_regions_driver_release(dev_priv); i915_ggtt_driver_release(dev_priv); i915_gem_drain_freed_objects(dev_priv); + i915_ggtt_driver_late_release(dev_priv); i915_driver_mmio_release(dev_priv); -- cgit From 62445a97c5fa85c4c6966c936155fc6a0dcfac5b Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Tue, 1 Jun 2021 09:46:42 +0200 Subject: drm/i915: Fix i915_sg_page_sizes to record dma segments rather than physical pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All users of this function actually want the dma segment sizes, but that's not what's calculated. Fix that and rename the function to i915_sg_dma_sizes to reflect what's calculated. Signed-off-by: Thomas Hellström Reviewed-by: Matthew Auld Signed-off-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210601074654.3103-4-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_phys.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +- drivers/gpu/drm/i915/i915_scatterlist.h | 16 ++++++++++++---- 4 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index ccede73c6465..616c3a2f1baf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -209,7 +209,7 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) if (IS_ERR(pages)) return PTR_ERR(pages); - sg_page_sizes = i915_sg_page_sizes(pages->sgl); + sg_page_sizes = i915_sg_dma_sizes(pages->sgl); __i915_gem_object_set_pages(obj, pages, sg_page_sizes); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 81dc2bf59bc3..36f373dc493c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -208,7 +208,7 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj) err_xfer: if (!IS_ERR_OR_NULL(pages)) { - unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); + unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl); __i915_gem_object_set_pages(obj, pages, sg_page_sizes); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index a657b99ec760..602f0ed983ec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -173,7 +173,7 @@ alloc_table: goto err; } - sg_page_sizes = i915_sg_page_sizes(st->sgl); + sg_page_sizes = i915_sg_dma_sizes(st->sgl); __i915_gem_object_set_pages(obj, st, sg_page_sizes); diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 9cb26a224034..b96baad66a3a 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -101,15 +101,23 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) -static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) +/** + * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist + * @sg: The scatterlist + * + * Return: An unsigned int with segment sizes logically or'ed together. + * A caller can use this information to determine what hardware page table + * entry sizes can be used to map the memory represented by the scatterlist. + */ +static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg) { unsigned int page_sizes; page_sizes = 0; - while (sg) { + while (sg && sg_dma_len(sg)) { GEM_BUG_ON(sg->offset); - GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); - page_sizes |= sg->length; + GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE)); + page_sizes |= sg_dma_len(sg); sg = __sg_next(sg); } -- cgit From 35cbd91eb541e001f6e2648c56abb5361e3d6774 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 1 Jun 2021 09:46:52 +0200 Subject: drm/i915: Disable mmap ioctl for gen12+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The platform should exclusively use mmap_offset, one less path to worry about for discrete. Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellström Signed-off-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210601074654.3103-14-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 8598a1c78a4c..65db290efd16 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -56,10 +56,17 @@ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_mmap *args = data; struct drm_i915_gem_object *obj; unsigned long addr; + /* mmap ioctl is disallowed for all platforms after TGL-LP. This also + * covers all platforms with local memory. + */ + if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915)) + return -EOPNOTSUPP; + if (args->flags & ~(I915_MMAP_WC)) return -EINVAL; -- cgit From 0e4fe0c9f2f981f26e01b73f3c465ca314c4f9c0 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 27 May 2021 19:51:45 +0100 Subject: Revert "i915: use io_mapping_map_user" This reverts commit b739f125e4ebd73d10ed30a856574e13649119ed. We are unfortunately seeing more issues like we did in 293837b9ac8d ("Revert "i915: fix remap_io_sg to verify the pgprot""), except this is now for the vm_fault_gtt path, where we are now hitting the same BUG_ON(!pte_none(*pte)): [10887.466150] kernel BUG at mm/memory.c:2183! [10887.466162] invalid opcode: 0000 [#1] PREEMPT SMP PTI [10887.466168] CPU: 0 PID: 7775 Comm: ffmpeg Tainted: G U 5.13.0-rc3-CI-Nightly #1 [10887.466174] Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./J4205-ITX, BIOS P1.40 07/14/2017 [10887.466177] RIP: 0010:remap_pfn_range_notrack+0x30f/0x440 [10887.466188] Code: e8 96 d7 e0 ff 84 c0 0f 84 27 01 00 00 48 ba 00 f0 ff ff ff ff 0f 00 4c 89 e0 48 c1 e0 0c 4d 85 ed 75 96 48 21 d0 31 f6 eb a9 <0f> 0b 48 39 37 0f 85 0e 01 00 00 48 8b 0c 24 48 39 4f 08 0f 85 00 [10887.466193] RSP: 0018:ffffc90006e33c50 EFLAGS: 00010286 [10887.466198] RAX: 800000000000002f RBX: 00007f5e01800000 RCX: 0000000000000028 [10887.466201] RDX: 0000000000000001 RSI: ffffea0000000000 RDI: 0000000000000000 [10887.466204] RBP: ffffea000033fea8 R08: 800000000000002f R09: ffff8881072256e0 [10887.466207] R10: ffffc9000b84fff8 R11: 0000000017dab000 R12: 0000000000089f9f [10887.466210] R13: 800000000000002f R14: 00007f5e017e4000 R15: ffff88800cffaf20 [10887.466213] FS: 00007f5e04849640(0000) GS:ffff888278000000(0000) knlGS:0000000000000000 [10887.466216] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [10887.466220] CR2: 00007fd9b191a2ac CR3: 00000001829ac000 CR4: 00000000003506f0 [10887.466223] Call Trace: [10887.466233] vm_fault_gtt+0x1ca/0x5d0 [i915] [10887.466381] ? ktime_get+0x38/0x90 [10887.466389] __do_fault+0x37/0x90 [10887.466395] __handle_mm_fault+0xc46/0x1200 [10887.466402] handle_mm_fault+0xce/0x2a0 [10887.466407] do_user_addr_fault+0x1c5/0x660 Reverting this commit is reported to fix the issue. Reported-by: Eero Tamminen References: https://gitlab.freedesktop.org/drm/intel/-/issues/3519 Fixes: b739f125e4eb ("i915: use io_mapping_map_user") Cc: Christoph Hellwig Cc: Daniel Vetter Signed-off-by: Matthew Auld Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210527185145.458021-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/Kconfig | 1 - drivers/gpu/drm/i915/gem/i915_gem_mman.c | 9 ++++--- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_mm.c | 44 ++++++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 93f4d059fc89..1e1cb245fca7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -20,7 +20,6 @@ config DRM_I915 select INPUT if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI - select IO_MAPPING select SYNC_FILE select IOSF_MBI select CRC32 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index fd1c9714f8d8..65db290efd16 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -374,10 +374,11 @@ retry: goto err_unpin; /* Finally, remap it using the new GTT offset */ - ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start + - (vma->ggtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start)); + ret = remap_io_mapping(area, + area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), + (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, + min_t(u64, vma->size, area->vm_end - area->vm_start), + &ggtt->iomap); if (ret) goto err_fence; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9aee6a045590..8e490fad8d63 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1936,6 +1936,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* i915_mm.c */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct scatterlist *sgl, resource_size_t iobase); diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 9a777b0ff59b..666808cb3a32 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -37,6 +37,17 @@ struct remap_pfn { resource_size_t iobase; }; +static int remap_pfn(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + #define use_dma(io) ((io) != -1) static inline unsigned long sgt_pfn(const struct remap_pfn *r) @@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) return 0; } +/** + * remap_io_mapping - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @iomap: the source io_mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + struct remap_pfn r; + int err; + #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + r.mm = vma->vm_mm; + r.pfn = pfn; + r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); + + err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); + return err; + } + + return 0; +} /** * remap_io_sg - remap an IO mapping to userspace -- cgit From d148738923fdb5077089e48ec15555e6008100d0 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Wed, 2 Jun 2021 10:38:08 +0200 Subject: drm/i915/ttm Initialize the ttm device and memory managers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Temporarily remove the buddy allocator and related selftests and hook up the TTM range manager for i915 regions. Also modify the mock region selftests somewhat to account for a fragmenting manager. Signed-off-by: Thomas Hellström Reviewed-by: Matthew Auld Signed-off-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210602083818.241793-2-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/i915/Kconfig | 1 + drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 59 +- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 6 +- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_region.c | 120 ---- drivers/gpu/drm/i915/gem/i915_gem_region.h | 4 - drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_stolen.h | 9 +- drivers/gpu/drm/i915/gt/intel_gt.c | 2 - drivers/gpu/drm/i915/gt/intel_region_lmem.c | 27 +- drivers/gpu/drm/i915/i915_buddy.c | 435 ------------ drivers/gpu/drm/i915/i915_buddy.h | 131 ---- drivers/gpu/drm/i915/i915_drv.c | 8 + drivers/gpu/drm/i915/i915_drv.h | 8 +- drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/i915_globals.c | 1 - drivers/gpu/drm/i915/i915_globals.h | 1 - drivers/gpu/drm/i915/i915_scatterlist.c | 70 ++ drivers/gpu/drm/i915/i915_scatterlist.h | 4 + drivers/gpu/drm/i915/intel_memory_region.c | 180 ++--- drivers/gpu/drm/i915/intel_memory_region.h | 44 +- drivers/gpu/drm/i915/intel_region_ttm.c | 220 ++++++ drivers/gpu/drm/i915/intel_region_ttm.h | 32 + drivers/gpu/drm/i915/selftests/i915_buddy.c | 789 --------------------- .../gpu/drm/i915/selftests/i915_mock_selftests.h | 1 - .../gpu/drm/i915/selftests/intel_memory_region.c | 133 ++-- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 10 + drivers/gpu/drm/i915/selftests/mock_region.c | 70 +- 30 files changed, 631 insertions(+), 1754 deletions(-) delete mode 100644 drivers/gpu/drm/i915/i915_buddy.c delete mode 100644 drivers/gpu/drm/i915/i915_buddy.h create mode 100644 drivers/gpu/drm/i915/intel_region_ttm.c create mode 100644 drivers/gpu/drm/i915/intel_region_ttm.h delete mode 100644 drivers/gpu/drm/i915/selftests/i915_buddy.c (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 1e1cb245fca7..b63d374dff23 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -26,6 +26,7 @@ config DRM_I915 select SND_HDA_I915 if SND_HDA_CORE select CEC_CORE if CEC_NOTIFIER select VMAP_PFN + select DRM_TTM help Choose this option if you have a system that has "Intel Graphics Media Accelerator" or "HD Graphics" integrated graphics, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index d0d936d9137b..cb8823570996 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -50,6 +50,7 @@ i915-y += i915_drv.o \ intel_memory_region.o \ intel_pch.o \ intel_pm.o \ + intel_region_ttm.o \ intel_runtime_pm.o \ intel_sideband.o \ intel_step.o \ @@ -160,7 +161,6 @@ gem-y += \ i915-y += \ $(gem-y) \ i915_active.o \ - i915_buddy.o \ i915_cmd_parser.o \ i915_gem_evict.o \ i915_gem_gtt.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index f44bdd08f7cb..3b4aa28a076d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -4,16 +4,71 @@ */ #include "intel_memory_region.h" +#include "intel_region_ttm.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_lmem.h" #include "i915_drv.h" +static void lmem_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node); + obj->mm.dirty = false; + sg_free_table(pages); + kfree(pages); +} + +static int lmem_get_pages(struct drm_i915_gem_object *obj) +{ + unsigned int flags; + struct sg_table *pages; + + flags = I915_ALLOC_MIN_PAGE_SIZE; + if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) + flags |= I915_ALLOC_CONTIGUOUS; + + obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region, + obj->base.size, + flags); + if (IS_ERR(obj->mm.st_mm_node)) + return PTR_ERR(obj->mm.st_mm_node); + + /* Range manager is always contigous */ + if (obj->mm.region->is_range_manager) + obj->flags |= I915_BO_ALLOC_CONTIGUOUS; + pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node); + if (IS_ERR(pages)) { + intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node); + return PTR_ERR(pages); + } + + __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); + + if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) { + void __iomem *vaddr = + i915_gem_object_lmem_io_map(obj, 0, obj->base.size); + + if (!vaddr) { + struct sg_table *pages = + __i915_gem_object_unset_pages(obj); + + if (!IS_ERR_OR_NULL(pages)) + lmem_put_pages(obj, pages); + } + + memset_io(vaddr, 0, obj->base.size); + io_mapping_unmap(vaddr); + } + + return 0; +} + const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { .name = "i915_gem_object_lmem", .flags = I915_GEM_OBJECT_HAS_IOMEM, - .get_pages = i915_gem_object_get_pages_buddy, - .put_pages = i915_gem_object_put_pages_buddy, + .get_pages = lmem_get_pages, + .put_pages = lmem_put_pages, .release = i915_gem_object_release_memory_region, }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 0415f99b6b95..f5b46d11e6e6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -235,10 +235,12 @@ struct drm_i915_gem_object { * Memory region for this object. */ struct intel_memory_region *region; + /** - * List of memory region blocks allocated for this object. + * Memory manager node allocated for this object. */ - struct list_head blocks; + void *st_mm_node; + /** * Element within memory_region->objects or region->purgeable * if the object is marked as DONTNEED. Access is protected by diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7361971c177d..6444e097016d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -475,7 +475,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, might_sleep(); GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + if (!i915_gem_object_has_pinned_pages(obj)) + assert_object_held(obj); /* As we iterate forward through the sg, we record each entry in a * radixtree for quick repeated (backwards) lookups. If we have seen diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index ce8fcfc54079..f25e6646c5b7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -8,129 +8,9 @@ #include "i915_drv.h" #include "i915_trace.h" -void -i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks); - - obj->mm.dirty = false; - sg_free_table(pages); - kfree(pages); -} - -int -i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) -{ - const u64 max_segment = i915_sg_segment_size(); - struct intel_memory_region *mem = obj->mm.region; - struct list_head *blocks = &obj->mm.blocks; - resource_size_t size = obj->base.size; - resource_size_t prev_end; - struct i915_buddy_block *block; - unsigned int flags; - struct sg_table *st; - struct scatterlist *sg; - unsigned int sg_page_sizes; - int ret; - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) { - kfree(st); - return -ENOMEM; - } - - flags = I915_ALLOC_MIN_PAGE_SIZE; - if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) - flags |= I915_ALLOC_CONTIGUOUS; - - ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks); - if (ret) - goto err_free_sg; - - GEM_BUG_ON(list_empty(blocks)); - - sg = st->sgl; - st->nents = 0; - sg_page_sizes = 0; - prev_end = (resource_size_t)-1; - - list_for_each_entry(block, blocks, link) { - u64 block_size, offset; - - block_size = min_t(u64, size, - i915_buddy_block_size(&mem->mm, block)); - offset = i915_buddy_block_offset(block); - - while (block_size) { - u64 len; - - if (offset != prev_end || sg->length >= max_segment) { - if (st->nents) { - sg_page_sizes |= sg->length; - sg = __sg_next(sg); - } - - sg_dma_address(sg) = mem->region.start + offset; - sg_dma_len(sg) = 0; - sg->length = 0; - st->nents++; - } - - len = min(block_size, max_segment - sg->length); - sg->length += len; - sg_dma_len(sg) += len; - - offset += len; - block_size -= len; - - prev_end = offset; - } - } - - sg_page_sizes |= sg->length; - sg_mark_end(sg); - i915_sg_trim(st); - - /* Intended for kernel internal use only */ - if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) { - struct scatterlist *sg; - unsigned long i; - - for_each_sg(st->sgl, sg, st->nents, i) { - unsigned int length; - void __iomem *vaddr; - dma_addr_t daddr; - - daddr = sg_dma_address(sg); - daddr -= mem->region.start; - length = sg_dma_len(sg); - - vaddr = io_mapping_map_wc(&mem->iomap, daddr, length); - memset64((void __force *)vaddr, 0, length / sizeof(u64)); - io_mapping_unmap(vaddr); - } - - wmb(); - } - - __i915_gem_object_set_pages(obj, st, sg_page_sizes); - - return 0; - -err_free_sg: - sg_free_table(st); - kfree(st); - return ret; -} - void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, struct intel_memory_region *mem) { - INIT_LIST_HEAD(&obj->mm.blocks); obj->mm.region = intel_memory_region_get(mem); if (obj->base.size <= mem->min_page_size) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h index ebddc86d78f7..84fcb3297400 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h @@ -12,10 +12,6 @@ struct intel_memory_region; struct drm_i915_gem_object; struct sg_table; -int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj); -void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, - struct sg_table *pages); - void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, struct intel_memory_region *mem); void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index a9bfa66c8da1..5d16c4462fda 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -628,11 +628,13 @@ static const struct intel_memory_region_ops shmem_region_ops = { .init_object = shmem_object_init, }; -struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915) +struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, + u16 type, u16 instance) { return intel_memory_region_create(i915, 0, totalram_pages() << PAGE_SHIFT, PAGE_SIZE, 0, + type, instance, &shmem_region_ops); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index b5553fc3ac4d..092d7a21de82 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -772,7 +772,8 @@ static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { }; struct intel_memory_region * -i915_gem_stolen_lmem_setup(struct drm_i915_private *i915) +i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, + u16 instance) { struct intel_uncore *uncore = &i915->uncore; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -790,6 +791,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915) mem = intel_memory_region_create(i915, lmem_base, lmem_size, I915_GTT_PAGE_SIZE_4K, io_start, + type, instance, &i915_region_stolen_lmem_ops); if (IS_ERR(mem)) return mem; @@ -811,14 +813,15 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915) } struct intel_memory_region* -i915_gem_stolen_smem_setup(struct drm_i915_private *i915) +i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, + u16 instance) { struct intel_memory_region *mem; mem = intel_memory_region_create(i915, intel_graphics_stolen_res.start, resource_size(&intel_graphics_stolen_res), - PAGE_SIZE, 0, + PAGE_SIZE, 0, type, instance, &i915_region_stolen_smem_ops); if (IS_ERR(mem)) return mem; @@ -826,7 +829,6 @@ i915_gem_stolen_smem_setup(struct drm_i915_private *i915) intel_memory_region_set_name(mem, "stolen-system"); mem->private = true; - return mem; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index 2bec6c367b9c..ccdf7befc571 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -21,8 +21,13 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, u64 end); void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node); -struct intel_memory_region *i915_gem_stolen_smem_setup(struct drm_i915_private *i915); -struct intel_memory_region *i915_gem_stolen_lmem_setup(struct drm_i915_private *i915); +struct intel_memory_region * +i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, + u16 instance); +struct intel_memory_region * +i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, + u16 instance); + struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, resource_size_t size); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 8d77dcbad059..3f88ecdee031 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -68,8 +68,6 @@ int intel_gt_probe_lmem(struct intel_gt *gt) id = INTEL_REGION_LMEM; mem->id = id; - mem->type = INTEL_MEMORY_LOCAL; - mem->instance = 0; intel_memory_region_set_name(mem, "local%u", mem->instance); diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 73fceb0c25fc..f7366b054f8e 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -5,6 +5,8 @@ #include "i915_drv.h" #include "intel_memory_region.h" +#include "intel_region_lmem.h" +#include "intel_region_ttm.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "intel_region_lmem.h" @@ -66,9 +68,9 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) static void region_lmem_release(struct intel_memory_region *mem) { - release_fake_lmem_bar(mem); + intel_region_ttm_fini(mem); io_mapping_fini(&mem->iomap); - intel_memory_region_release_buddy(mem); + release_fake_lmem_bar(mem); } static int @@ -83,12 +85,21 @@ region_lmem_init(struct intel_memory_region *mem) if (!io_mapping_init_wc(&mem->iomap, mem->io_start, - resource_size(&mem->region))) - return -EIO; + resource_size(&mem->region))) { + ret = -EIO; + goto out_no_io; + } - ret = intel_memory_region_init_buddy(mem); + ret = intel_region_ttm_init(mem); if (ret) - io_mapping_fini(&mem->iomap); + goto out_no_buddy; + + return 0; + +out_no_buddy: + io_mapping_fini(&mem->iomap); +out_no_io: + release_fake_lmem_bar(mem); return ret; } @@ -127,6 +138,8 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt) mappable_end, PAGE_SIZE, io_start, + INTEL_MEMORY_LOCAL, + 0, &intel_region_lmem_ops); if (!IS_ERR(mem)) { drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n", @@ -198,6 +211,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) lmem_size, I915_GTT_PAGE_SIZE_4K, io_start, + INTEL_MEMORY_LOCAL, + 0, &intel_region_lmem_ops); if (IS_ERR(mem)) return mem; diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c deleted file mode 100644 index 3a2f6eecb2fc..000000000000 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ /dev/null @@ -1,435 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2019 Intel Corporation - */ - -#include -#include - -#include "i915_buddy.h" - -#include "i915_gem.h" -#include "i915_globals.h" -#include "i915_utils.h" - -static struct i915_global_block { - struct i915_global base; - struct kmem_cache *slab_blocks; -} global; - -static void i915_global_buddy_shrink(void) -{ - kmem_cache_shrink(global.slab_blocks); -} - -static void i915_global_buddy_exit(void) -{ - kmem_cache_destroy(global.slab_blocks); -} - -static struct i915_global_block global = { { - .shrink = i915_global_buddy_shrink, - .exit = i915_global_buddy_exit, -} }; - -int __init i915_global_buddy_init(void) -{ - global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN); - if (!global.slab_blocks) - return -ENOMEM; - - i915_global_register(&global.base); - return 0; -} - -static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent, - unsigned int order, - u64 offset) -{ - struct i915_buddy_block *block; - - GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER); - - block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL); - if (!block) - return NULL; - - block->header = offset; - block->header |= order; - block->parent = parent; - - GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED); - return block; -} - -static void i915_block_free(struct i915_buddy_block *block) -{ - kmem_cache_free(global.slab_blocks, block); -} - -static void mark_allocated(struct i915_buddy_block *block) -{ - block->header &= ~I915_BUDDY_HEADER_STATE; - block->header |= I915_BUDDY_ALLOCATED; - - list_del(&block->link); -} - -static void mark_free(struct i915_buddy_mm *mm, - struct i915_buddy_block *block) -{ - block->header &= ~I915_BUDDY_HEADER_STATE; - block->header |= I915_BUDDY_FREE; - - list_add(&block->link, - &mm->free_list[i915_buddy_block_order(block)]); -} - -static void mark_split(struct i915_buddy_block *block) -{ - block->header &= ~I915_BUDDY_HEADER_STATE; - block->header |= I915_BUDDY_SPLIT; - - list_del(&block->link); -} - -int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size) -{ - unsigned int i; - u64 offset; - - if (size < chunk_size) - return -EINVAL; - - if (chunk_size < PAGE_SIZE) - return -EINVAL; - - if (!is_power_of_2(chunk_size)) - return -EINVAL; - - size = round_down(size, chunk_size); - - mm->size = size; - mm->chunk_size = chunk_size; - mm->max_order = ilog2(size) - ilog2(chunk_size); - - GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER); - - mm->free_list = kmalloc_array(mm->max_order + 1, - sizeof(struct list_head), - GFP_KERNEL); - if (!mm->free_list) - return -ENOMEM; - - for (i = 0; i <= mm->max_order; ++i) - INIT_LIST_HEAD(&mm->free_list[i]); - - mm->n_roots = hweight64(size); - - mm->roots = kmalloc_array(mm->n_roots, - sizeof(struct i915_buddy_block *), - GFP_KERNEL); - if (!mm->roots) - goto out_free_list; - - offset = 0; - i = 0; - - /* - * Split into power-of-two blocks, in case we are given a size that is - * not itself a power-of-two. - */ - do { - struct i915_buddy_block *root; - unsigned int order; - u64 root_size; - - root_size = rounddown_pow_of_two(size); - order = ilog2(root_size) - ilog2(chunk_size); - - root = i915_block_alloc(NULL, order, offset); - if (!root) - goto out_free_roots; - - mark_free(mm, root); - - GEM_BUG_ON(i > mm->max_order); - GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size); - - mm->roots[i] = root; - - offset += root_size; - size -= root_size; - i++; - } while (size); - - return 0; - -out_free_roots: - while (i--) - i915_block_free(mm->roots[i]); - kfree(mm->roots); -out_free_list: - kfree(mm->free_list); - return -ENOMEM; -} - -void i915_buddy_fini(struct i915_buddy_mm *mm) -{ - int i; - - for (i = 0; i < mm->n_roots; ++i) { - GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i])); - i915_block_free(mm->roots[i]); - } - - kfree(mm->roots); - kfree(mm->free_list); -} - -static int split_block(struct i915_buddy_mm *mm, - struct i915_buddy_block *block) -{ - unsigned int block_order = i915_buddy_block_order(block) - 1; - u64 offset = i915_buddy_block_offset(block); - - GEM_BUG_ON(!i915_buddy_block_is_free(block)); - GEM_BUG_ON(!i915_buddy_block_order(block)); - - block->left = i915_block_alloc(block, block_order, offset); - if (!block->left) - return -ENOMEM; - - block->right = i915_block_alloc(block, block_order, - offset + (mm->chunk_size << block_order)); - if (!block->right) { - i915_block_free(block->left); - return -ENOMEM; - } - - mark_free(mm, block->left); - mark_free(mm, block->right); - - mark_split(block); - - return 0; -} - -static struct i915_buddy_block * -get_buddy(struct i915_buddy_block *block) -{ - struct i915_buddy_block *parent; - - parent = block->parent; - if (!parent) - return NULL; - - if (parent->left == block) - return parent->right; - - return parent->left; -} - -static void __i915_buddy_free(struct i915_buddy_mm *mm, - struct i915_buddy_block *block) -{ - struct i915_buddy_block *parent; - - while ((parent = block->parent)) { - struct i915_buddy_block *buddy; - - buddy = get_buddy(block); - - if (!i915_buddy_block_is_free(buddy)) - break; - - list_del(&buddy->link); - - i915_block_free(block); - i915_block_free(buddy); - - block = parent; - } - - mark_free(mm, block); -} - -void i915_buddy_free(struct i915_buddy_mm *mm, - struct i915_buddy_block *block) -{ - GEM_BUG_ON(!i915_buddy_block_is_allocated(block)); - __i915_buddy_free(mm, block); -} - -void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects) -{ - struct i915_buddy_block *block, *on; - - list_for_each_entry_safe(block, on, objects, link) { - i915_buddy_free(mm, block); - cond_resched(); - } - INIT_LIST_HEAD(objects); -} - -/* - * Allocate power-of-two block. The order value here translates to: - * - * 0 = 2^0 * mm->chunk_size - * 1 = 2^1 * mm->chunk_size - * 2 = 2^2 * mm->chunk_size - * ... - */ -struct i915_buddy_block * -i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order) -{ - struct i915_buddy_block *block = NULL; - unsigned int i; - int err; - - for (i = order; i <= mm->max_order; ++i) { - block = list_first_entry_or_null(&mm->free_list[i], - struct i915_buddy_block, - link); - if (block) - break; - } - - if (!block) - return ERR_PTR(-ENOSPC); - - GEM_BUG_ON(!i915_buddy_block_is_free(block)); - - while (i != order) { - err = split_block(mm, block); - if (unlikely(err)) - goto out_free; - - /* Go low */ - block = block->left; - i--; - } - - mark_allocated(block); - kmemleak_update_trace(block); - return block; - -out_free: - if (i != order) - __i915_buddy_free(mm, block); - return ERR_PTR(err); -} - -static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) -{ - return s1 <= e2 && e1 >= s2; -} - -static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) -{ - return s1 <= s2 && e1 >= e2; -} - -/* - * Allocate range. Note that it's safe to chain together multiple alloc_ranges - * with the same blocks list. - * - * Intended for pre-allocating portions of the address space, for example to - * reserve a block for the initial framebuffer or similar, hence the expectation - * here is that i915_buddy_alloc() is still the main vehicle for - * allocations, so if that's not the case then the drm_mm range allocator is - * probably a much better fit, and so you should probably go use that instead. - */ -int i915_buddy_alloc_range(struct i915_buddy_mm *mm, - struct list_head *blocks, - u64 start, u64 size) -{ - struct i915_buddy_block *block; - struct i915_buddy_block *buddy; - LIST_HEAD(allocated); - LIST_HEAD(dfs); - u64 end; - int err; - int i; - - if (size < mm->chunk_size) - return -EINVAL; - - if (!IS_ALIGNED(size | start, mm->chunk_size)) - return -EINVAL; - - if (range_overflows(start, size, mm->size)) - return -EINVAL; - - for (i = 0; i < mm->n_roots; ++i) - list_add_tail(&mm->roots[i]->tmp_link, &dfs); - - end = start + size - 1; - - do { - u64 block_start; - u64 block_end; - - block = list_first_entry_or_null(&dfs, - struct i915_buddy_block, - tmp_link); - if (!block) - break; - - list_del(&block->tmp_link); - - block_start = i915_buddy_block_offset(block); - block_end = block_start + i915_buddy_block_size(mm, block) - 1; - - if (!overlaps(start, end, block_start, block_end)) - continue; - - if (i915_buddy_block_is_allocated(block)) { - err = -ENOSPC; - goto err_free; - } - - if (contains(start, end, block_start, block_end)) { - if (!i915_buddy_block_is_free(block)) { - err = -ENOSPC; - goto err_free; - } - - mark_allocated(block); - list_add_tail(&block->link, &allocated); - continue; - } - - if (!i915_buddy_block_is_split(block)) { - err = split_block(mm, block); - if (unlikely(err)) - goto err_undo; - } - - list_add(&block->right->tmp_link, &dfs); - list_add(&block->left->tmp_link, &dfs); - } while (1); - - list_splice_tail(&allocated, blocks); - return 0; - -err_undo: - /* - * We really don't want to leave around a bunch of split blocks, since - * bigger is better, so make sure we merge everything back before we - * free the allocated blocks. - */ - buddy = get_buddy(block); - if (buddy && - (i915_buddy_block_is_free(block) && - i915_buddy_block_is_free(buddy))) - __i915_buddy_free(mm, block); - -err_free: - i915_buddy_free_list(mm, &allocated); - return err; -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/i915_buddy.c" -#endif diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h deleted file mode 100644 index 9ce5200f4001..000000000000 --- a/drivers/gpu/drm/i915/i915_buddy.h +++ /dev/null @@ -1,131 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2019 Intel Corporation - */ - -#ifndef __I915_BUDDY_H__ -#define __I915_BUDDY_H__ - -#include -#include - -struct i915_buddy_block { -#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) -#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) -#define I915_BUDDY_ALLOCATED (1 << 10) -#define I915_BUDDY_FREE (2 << 10) -#define I915_BUDDY_SPLIT (3 << 10) -/* Free to be used, if needed in the future */ -#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6) -#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) - u64 header; - - struct i915_buddy_block *left; - struct i915_buddy_block *right; - struct i915_buddy_block *parent; - - void *private; /* owned by creator */ - - /* - * While the block is allocated by the user through i915_buddy_alloc*, - * the user has ownership of the link, for example to maintain within - * a list, if so desired. As soon as the block is freed with - * i915_buddy_free* ownership is given back to the mm. - */ - struct list_head link; - struct list_head tmp_link; -}; - -/* Order-zero must be at least PAGE_SIZE */ -#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT) - -/* - * Binary Buddy System. - * - * Locking should be handled by the user, a simple mutex around - * i915_buddy_alloc* and i915_buddy_free* should suffice. - */ -struct i915_buddy_mm { - /* Maintain a free list for each order. */ - struct list_head *free_list; - - /* - * Maintain explicit binary tree(s) to track the allocation of the - * address space. This gives us a simple way of finding a buddy block - * and performing the potentially recursive merge step when freeing a - * block. Nodes are either allocated or free, in which case they will - * also exist on the respective free list. - */ - struct i915_buddy_block **roots; - - /* - * Anything from here is public, and remains static for the lifetime of - * the mm. Everything above is considered do-not-touch. - */ - unsigned int n_roots; - unsigned int max_order; - - /* Must be at least PAGE_SIZE */ - u64 chunk_size; - u64 size; -}; - -static inline u64 -i915_buddy_block_offset(struct i915_buddy_block *block) -{ - return block->header & I915_BUDDY_HEADER_OFFSET; -} - -static inline unsigned int -i915_buddy_block_order(struct i915_buddy_block *block) -{ - return block->header & I915_BUDDY_HEADER_ORDER; -} - -static inline unsigned int -i915_buddy_block_state(struct i915_buddy_block *block) -{ - return block->header & I915_BUDDY_HEADER_STATE; -} - -static inline bool -i915_buddy_block_is_allocated(struct i915_buddy_block *block) -{ - return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED; -} - -static inline bool -i915_buddy_block_is_free(struct i915_buddy_block *block) -{ - return i915_buddy_block_state(block) == I915_BUDDY_FREE; -} - -static inline bool -i915_buddy_block_is_split(struct i915_buddy_block *block) -{ - return i915_buddy_block_state(block) == I915_BUDDY_SPLIT; -} - -static inline u64 -i915_buddy_block_size(struct i915_buddy_mm *mm, - struct i915_buddy_block *block) -{ - return mm->chunk_size << i915_buddy_block_order(block); -} - -int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size); - -void i915_buddy_fini(struct i915_buddy_mm *mm); - -struct i915_buddy_block * -i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order); - -int i915_buddy_alloc_range(struct i915_buddy_mm *mm, - struct list_head *blocks, - u64 start, u64 size); - -void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block); - -void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects); - -#endif diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 92bccc5623a8..122dd297b6af 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -84,6 +84,7 @@ #include "intel_gvt.h" #include "intel_memory_region.h" #include "intel_pm.h" +#include "intel_region_ttm.h" #include "intel_sideband.h" #include "vlv_suspend.h" @@ -335,6 +336,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) if (ret < 0) goto err_workqueues; + ret = intel_region_ttm_device_init(dev_priv); + if (ret) + goto err_ttm; + intel_wopcm_init_early(&dev_priv->wopcm); intel_gt_init_early(&dev_priv->gt, dev_priv); @@ -359,6 +364,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) err_gem: i915_gem_cleanup_early(dev_priv); intel_gt_driver_late_release(&dev_priv->gt); + intel_region_ttm_device_fini(dev_priv); +err_ttm: vlv_suspend_cleanup(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); @@ -376,6 +383,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) intel_power_domains_cleanup(dev_priv); i915_gem_cleanup_early(dev_priv); intel_gt_driver_late_release(&dev_priv->gt); + intel_region_ttm_device_fini(dev_priv); vlv_suspend_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8e490fad8d63..bcb415157971 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -59,6 +59,7 @@ #include #include #include +#include #include "i915_params.h" #include "i915_reg.h" @@ -776,6 +777,7 @@ struct intel_cdclk_config { struct i915_selftest_stash { atomic_t counter; + struct ida mock_region_instances; }; struct drm_i915_private { @@ -1165,6 +1167,9 @@ struct drm_i915_private { /* Mutex to protect the above hdcp component related values. */ struct mutex hdcp_comp_mutex; + /* The TTM device structure. */ + struct ttm_device bdev; + I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) /* @@ -1758,7 +1763,8 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); -struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915); +struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, + u16 type, u16 instance); static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cffd7f4f87dc..0993d706f067 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1108,6 +1108,7 @@ err_unlock: } i915_gem_drain_freed_objects(dev_priv); + return ret; } diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c index 3aa213684293..77f1911c463b 100644 --- a/drivers/gpu/drm/i915/i915_globals.c +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -87,7 +87,6 @@ static void __i915_globals_cleanup(void) static __initconst int (* const initfn[])(void) = { i915_global_active_init, - i915_global_buddy_init, i915_global_context_init, i915_global_gem_context_init, i915_global_objects_init, diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h index b2f5cd9b9b1a..2d199f411a4a 100644 --- a/drivers/gpu/drm/i915/i915_globals.h +++ b/drivers/gpu/drm/i915/i915_globals.h @@ -27,7 +27,6 @@ void i915_globals_exit(void); /* constructors */ int i915_global_active_init(void); -int i915_global_buddy_init(void); int i915_global_context_init(void); int i915_global_gem_context_init(void); int i915_global_objects_init(void); diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index cc6b3846a8c7..69e9e6c3135e 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -6,6 +6,10 @@ #include "i915_scatterlist.h" +#include + +#include + bool i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; @@ -34,6 +38,72 @@ bool i915_sg_trim(struct sg_table *orig_st) return true; } +/** + * i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node + * @node: The drm_mm_node. + * @region_start: An offset to add to the dma addresses of the sg list. + * + * Create a struct sg_table, initializing it from a struct drm_mm_node, + * taking a maximum segment length into account, splitting into segments + * if necessary. + * + * Return: A pointer to a kmalloced struct sg_table on success, negative + * error code cast to an error pointer on failure. + */ +struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, + u64 region_start) +{ + const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ + u64 segment_pages = max_segment >> PAGE_SHIFT; + u64 block_size, offset, prev_end; + struct sg_table *st; + struct scatterlist *sg; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages), + GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + + sg = st->sgl; + st->nents = 0; + prev_end = (resource_size_t)-1; + block_size = node->size << PAGE_SHIFT; + offset = node->start << PAGE_SHIFT; + + while (block_size) { + u64 len; + + if (offset != prev_end || sg->length >= max_segment) { + if (st->nents) + sg = __sg_next(sg); + + sg_dma_address(sg) = region_start + offset; + sg_dma_len(sg) = 0; + sg->length = 0; + st->nents++; + } + + len = min(block_size, max_segment - sg->length); + sg->length += len; + sg_dma_len(sg) += len; + + offset += len; + block_size -= len; + + prev_end = offset; + } + + sg_mark_end(sg); + i915_sg_trim(st); + + return st; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/scatterlist.c" #endif diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index b96baad66a3a..5acca45ea981 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -13,6 +13,8 @@ #include "i915_gem.h" +struct drm_mm_node; + /* * Optimised SGL iterator for GEM objects */ @@ -141,4 +143,6 @@ static inline unsigned int i915_sg_segment_size(void) bool i915_sg_trim(struct sg_table *orig_st); +struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, + u64 region_start); #endif diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index d98e8b81d322..4092cc987679 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -28,6 +28,11 @@ static const struct { }, }; +struct intel_region_reserve { + struct list_head link; + void *node; +}; + struct intel_memory_region * intel_memory_region_lookup(struct drm_i915_private *i915, u16 class, u16 instance) @@ -58,146 +63,61 @@ intel_memory_region_by_type(struct drm_i915_private *i915, return NULL; } -static u64 -intel_memory_region_free_pages(struct intel_memory_region *mem, - struct list_head *blocks) +/** + * intel_memory_region_unreserve - Unreserve all previously reserved + * ranges + * @mem: The region containing the reserved ranges. + */ +void intel_memory_region_unreserve(struct intel_memory_region *mem) { - struct i915_buddy_block *block, *on; - u64 size = 0; + struct intel_region_reserve *reserve, *next; - list_for_each_entry_safe(block, on, blocks, link) { - size += i915_buddy_block_size(&mem->mm, block); - i915_buddy_free(&mem->mm, block); - } - INIT_LIST_HEAD(blocks); + if (!mem->priv_ops || !mem->priv_ops->free) + return; - return size; -} - -void -__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, - struct list_head *blocks) -{ mutex_lock(&mem->mm_lock); - mem->avail += intel_memory_region_free_pages(mem, blocks); - mutex_unlock(&mem->mm_lock); -} - -void -__intel_memory_region_put_block_buddy(struct i915_buddy_block *block) -{ - struct list_head blocks; - - INIT_LIST_HEAD(&blocks); - list_add(&block->link, &blocks); - __intel_memory_region_put_pages_buddy(block->private, &blocks); -} - -int -__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags, - struct list_head *blocks) -{ - unsigned int min_order = 0; - unsigned long n_pages; - - GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size)); - GEM_BUG_ON(!list_empty(blocks)); - - if (flags & I915_ALLOC_MIN_PAGE_SIZE) { - min_order = ilog2(mem->min_page_size) - - ilog2(mem->mm.chunk_size); - } - - if (flags & I915_ALLOC_CONTIGUOUS) { - size = roundup_pow_of_two(size); - min_order = ilog2(size) - ilog2(mem->mm.chunk_size); + list_for_each_entry_safe(reserve, next, &mem->reserved, link) { + list_del(&reserve->link); + mem->priv_ops->free(mem, reserve->node); + kfree(reserve); } - - if (size > mem->mm.size) - return -E2BIG; - - n_pages = size >> ilog2(mem->mm.chunk_size); - - mutex_lock(&mem->mm_lock); - - do { - struct i915_buddy_block *block; - unsigned int order; - - order = fls(n_pages) - 1; - GEM_BUG_ON(order > mem->mm.max_order); - GEM_BUG_ON(order < min_order); - - do { - block = i915_buddy_alloc(&mem->mm, order); - if (!IS_ERR(block)) - break; - - if (order-- == min_order) - goto err_free_blocks; - } while (1); - - n_pages -= BIT(order); - - block->private = mem; - list_add_tail(&block->link, blocks); - - if (!n_pages) - break; - } while (1); - - mem->avail -= size; mutex_unlock(&mem->mm_lock); - return 0; - -err_free_blocks: - intel_memory_region_free_pages(mem, blocks); - mutex_unlock(&mem->mm_lock); - return -ENXIO; } -struct i915_buddy_block * -__intel_memory_region_get_block_buddy(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags) +/** + * intel_memory_region_reserve - Reserve a memory range + * @mem: The region for which we want to reserve a range. + * @offset: Start of the range to reserve. + * @size: The size of the range to reserve. + * + * Return: 0 on success, negative error code on failure. + */ +int intel_memory_region_reserve(struct intel_memory_region *mem, + resource_size_t offset, + resource_size_t size) { - struct i915_buddy_block *block; - LIST_HEAD(blocks); int ret; + struct intel_region_reserve *reserve; - ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks); - if (ret) - return ERR_PTR(ret); + if (!mem->priv_ops || !mem->priv_ops->reserve) + return -EINVAL; - block = list_first_entry(&blocks, typeof(*block), link); - list_del_init(&block->link); - return block; -} + reserve = kzalloc(sizeof(*reserve), GFP_KERNEL); + if (!reserve) + return -ENOMEM; -int intel_memory_region_init_buddy(struct intel_memory_region *mem) -{ - return i915_buddy_init(&mem->mm, resource_size(&mem->region), - PAGE_SIZE); -} - -void intel_memory_region_release_buddy(struct intel_memory_region *mem) -{ - i915_buddy_free_list(&mem->mm, &mem->reserved); - i915_buddy_fini(&mem->mm); -} - -int intel_memory_region_reserve(struct intel_memory_region *mem, - u64 offset, u64 size) -{ - int ret; + reserve->node = mem->priv_ops->reserve(mem, offset, size); + if (IS_ERR(reserve->node)) { + ret = PTR_ERR(reserve->node); + kfree(reserve); + return ret; + } mutex_lock(&mem->mm_lock); - ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size); + list_add_tail(&reserve->link, &mem->reserved); mutex_unlock(&mem->mm_lock); - return ret; + return 0; } struct intel_memory_region * @@ -206,6 +126,8 @@ intel_memory_region_create(struct drm_i915_private *i915, resource_size_t size, resource_size_t min_page_size, resource_size_t io_start, + u16 type, + u16 instance, const struct intel_memory_region_ops *ops) { struct intel_memory_region *mem; @@ -222,6 +144,8 @@ intel_memory_region_create(struct drm_i915_private *i915, mem->ops = ops; mem->total = size; mem->avail = mem->total; + mem->type = type; + mem->instance = instance; mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); @@ -259,6 +183,7 @@ static void __intel_memory_region_destroy(struct kref *kref) struct intel_memory_region *mem = container_of(kref, typeof(*mem), kref); + intel_memory_region_unreserve(mem); if (mem->ops->release) mem->ops->release(mem); @@ -296,15 +221,15 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915) instance = intel_region_map[i].instance; switch (type) { case INTEL_MEMORY_SYSTEM: - mem = i915_gem_shmem_setup(i915); + mem = i915_gem_shmem_setup(i915, type, instance); break; case INTEL_MEMORY_STOLEN_LOCAL: - mem = i915_gem_stolen_lmem_setup(i915); + mem = i915_gem_stolen_lmem_setup(i915, type, instance); if (!IS_ERR(mem)) i915->mm.stolen_region = mem; break; case INTEL_MEMORY_STOLEN_SYSTEM: - mem = i915_gem_stolen_smem_setup(i915); + mem = i915_gem_stolen_smem_setup(i915, type, instance); if (!IS_ERR(mem)) i915->mm.stolen_region = mem; break; @@ -321,9 +246,6 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915) } mem->id = i; - mem->type = type; - mem->instance = instance; - i915->mm.regions[i] = mem; } diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index d24ce5a0b30b..e69cde13daf2 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -13,8 +13,6 @@ #include #include -#include "i915_buddy.h" - struct drm_i915_private; struct drm_i915_gem_object; struct intel_memory_region; @@ -25,6 +23,7 @@ enum intel_memory_type { INTEL_MEMORY_LOCAL = I915_MEMORY_CLASS_DEVICE, INTEL_MEMORY_STOLEN_SYSTEM, INTEL_MEMORY_STOLEN_LOCAL, + INTEL_MEMORY_MOCK, }; enum intel_region_id { @@ -59,10 +58,19 @@ struct intel_memory_region_ops { unsigned int flags); }; +struct intel_memory_region_private_ops { + void *(*reserve)(struct intel_memory_region *mem, + resource_size_t offset, + resource_size_t size); + void (*free)(struct intel_memory_region *mem, + void *node); +}; + struct intel_memory_region { struct drm_i915_private *i915; const struct intel_memory_region_ops *ops; + const struct intel_memory_region_private_ops *priv_ops; struct io_mapping iomap; struct resource region; @@ -70,7 +78,6 @@ struct intel_memory_region { /* For fake LMEM */ struct drm_mm_node fake_mappable; - struct i915_buddy_mm mm; struct mutex mm_lock; struct kref kref; @@ -95,36 +102,26 @@ struct intel_memory_region { struct list_head list; struct list_head purgeable; } objects; + + size_t chunk_size; + unsigned int max_order; + bool is_range_manager; + + void *region_private; }; struct intel_memory_region * intel_memory_region_lookup(struct drm_i915_private *i915, u16 class, u16 instance); -int intel_memory_region_init_buddy(struct intel_memory_region *mem); -void intel_memory_region_release_buddy(struct intel_memory_region *mem); - -int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags, - struct list_head *blocks); -struct i915_buddy_block * -__intel_memory_region_get_block_buddy(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags); -void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, - struct list_head *blocks); -void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block); - -int intel_memory_region_reserve(struct intel_memory_region *mem, - u64 offset, u64 size); - struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, resource_size_t size, resource_size_t min_page_size, resource_size_t io_start, + u16 type, + u16 instance, const struct intel_memory_region_ops *ops); struct intel_memory_region * @@ -141,4 +138,9 @@ __printf(2, 3) void intel_memory_region_set_name(struct intel_memory_region *mem, const char *fmt, ...); +void intel_memory_region_unreserve(struct intel_memory_region *mem); + +int intel_memory_region_reserve(struct intel_memory_region *mem, + resource_size_t offset, + resource_size_t size); #endif diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c new file mode 100644 index 000000000000..c8ac118c21f6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ +#include +#include + +#include "i915_drv.h" +#include "i915_scatterlist.h" + +#include "intel_region_ttm.h" + +/** + * DOC: TTM support structure + * + * The code in this file deals with setting up memory managers for TTM + * LMEM and MOCK regions and converting the output from + * the managers to struct sg_table, Basically providing the mapping from + * i915 GEM regions to TTM memory types and resource managers. + */ + +/* A Zero-initialized driver for now. We don't have a TTM backend yet. */ +static struct ttm_device_funcs i915_ttm_bo_driver; + +/** + * intel_region_ttm_device_init - Initialize a TTM device + * @dev_priv: Pointer to an i915 device private structure. + * + * Return: 0 on success, negative error code on failure. + */ +int intel_region_ttm_device_init(struct drm_i915_private *dev_priv) +{ + struct drm_device *drm = &dev_priv->drm; + + return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver, + drm->dev, drm->anon_inode->i_mapping, + drm->vma_offset_manager, false, false); +} + +/** + * intel_region_ttm_device_fini - Finalize a TTM device + * @dev_priv: Pointer to an i915 device private structure. + */ +void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv) +{ + ttm_device_fini(&dev_priv->bdev); +} + +/* + * Map the i915 memory regions to TTM memory types. We use the + * driver-private types for now, reserving TTM_PL_VRAM for stolen + * memory and TTM_PL_TT for GGTT use if decided to implement this. + */ +static int intel_region_to_ttm_type(struct intel_memory_region *mem) +{ + int type; + + GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL && + mem->type != INTEL_MEMORY_MOCK); + + type = mem->instance + TTM_PL_PRIV; + GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES); + + return type; +} + +static void *intel_region_ttm_node_reserve(struct intel_memory_region *mem, + resource_size_t offset, + resource_size_t size) +{ + struct ttm_resource_manager *man = mem->region_private; + struct ttm_place place = {}; + struct ttm_resource res = {}; + struct ttm_buffer_object mock_bo = {}; + int ret; + + /* + * Having to use a mock_bo is unfortunate but stems from some + * drivers having private managers that insist to know what the + * allocate memory is intended for, using it to send private + * data to the manager. Also recently the bo has been used to send + * alignment info to the manager. Assume that apart from the latter, + * none of the managers we use will ever access the buffer object + * members, hoping we can pass the alignment info in the + * struct ttm_place in the future. + */ + + place.fpfn = offset >> PAGE_SHIFT; + place.lpfn = place.fpfn + (size >> PAGE_SHIFT); + res.num_pages = size >> PAGE_SHIFT; + ret = man->func->alloc(man, &mock_bo, &place, &res); + if (ret == -ENOSPC) + ret = -ENXIO; + + return ret ? ERR_PTR(ret) : res.mm_node; +} + +/** + * intel_region_ttm_node_free - Free a node allocated from a resource manager + * @mem: The region the node was allocated from. + * @node: The opaque node representing an allocation. + */ +void intel_region_ttm_node_free(struct intel_memory_region *mem, + void *node) +{ + struct ttm_resource_manager *man = mem->region_private; + struct ttm_resource res = {}; + + res.mm_node = node; + man->func->free(man, &res); +} + +static const struct intel_memory_region_private_ops priv_ops = { + .reserve = intel_region_ttm_node_reserve, + .free = intel_region_ttm_node_free, +}; + +int intel_region_ttm_init(struct intel_memory_region *mem) +{ + struct ttm_device *bdev = &mem->i915->bdev; + int mem_type = intel_region_to_ttm_type(mem); + int ret; + + ret = ttm_range_man_init(bdev, mem_type, false, + resource_size(&mem->region) >> PAGE_SHIFT); + if (ret) + return ret; + + mem->chunk_size = PAGE_SIZE; + mem->max_order = + get_order(rounddown_pow_of_two(resource_size(&mem->region))); + mem->is_range_manager = true; + mem->priv_ops = &priv_ops; + mem->region_private = ttm_manager_type(bdev, mem_type); + + return 0; +} + +/** + * intel_region_ttm_fini - Finalize a TTM region. + * @mem: The memory region + * + * This functions takes down the TTM resource manager associated with the + * memory region, and if it was registered with the TTM device, + * removes that registration. + */ +void intel_region_ttm_fini(struct intel_memory_region *mem) +{ + int ret; + + ret = ttm_range_man_fini(&mem->i915->bdev, + intel_region_to_ttm_type(mem)); + GEM_WARN_ON(ret); + mem->region_private = NULL; +} + +/** + * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node + * to an sg_table. + * @mem: The memory region. + * @node: The resource manager node obtained from the TTM resource manager. + * + * The gem backends typically use sg-tables for operations on the underlying + * io_memory. So provide a way for the backends to translate the + * nodes they are handed from TTM to sg-tables. + * + * Return: A malloced sg_table on success, an error pointer on failure. + */ +struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, + void *node) +{ + return i915_sg_from_mm_node(node, mem->region.start); +} + +/** + * intel_region_ttm_node_alloc - Allocate memory resources from a region + * @mem: The memory region, + * @size: The requested size in bytes + * @flags: Allocation flags + * + * This functionality is provided only for callers that need to allocate + * memory from standalone TTM range managers, without the TTM eviction + * functionality. Don't use if you are not completely sure that's the + * case. The returned opaque node can be converted to an sg_table using + * intel_region_ttm_node_to_st(), and can be freed using + * intel_region_ttm_node_free(). + * + * Return: A valid pointer on success, an error pointer on failure. + */ +void *intel_region_ttm_node_alloc(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) +{ + struct ttm_resource_manager *man = mem->region_private; + struct ttm_place place = {}; + struct ttm_resource res = {}; + struct ttm_buffer_object mock_bo = {}; + int ret; + + /* + * We ignore the flags for now since we're using the range + * manager and contigous and min page size would be fulfilled + * by default if size is min page size aligned. + */ + res.num_pages = size >> PAGE_SHIFT; + + if (mem->is_range_manager) { + if (size >= SZ_1G) + mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT; + else if (size >= SZ_2M) + mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT; + else if (size >= SZ_64K) + mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT; + } + + ret = man->func->alloc(man, &mock_bo, &place, &res); + if (ret == -ENOSPC) + ret = -ENXIO; + return ret ? ERR_PTR(ret) : res.mm_node; +} diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h new file mode 100644 index 000000000000..1c82c6c3429d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _INTEL_REGION_TTM_H_ +#define _INTEL_REGION_TTM_H_ + +#include + +#include "i915_selftest.h" + +struct drm_i915_private; +struct intel_memory_region; + +int intel_region_ttm_device_init(struct drm_i915_private *dev_priv); + +void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv); + +int intel_region_ttm_init(struct intel_memory_region *mem); + +void intel_region_ttm_fini(struct intel_memory_region *mem); + +struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, + void *node); + +void *intel_region_ttm_node_alloc(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags); + +void intel_region_ttm_node_free(struct intel_memory_region *mem, + void *node); +#endif /* _INTEL_REGION_TTM_H_ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c deleted file mode 100644 index f0f5c4df8dbc..000000000000 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ /dev/null @@ -1,789 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2019 Intel Corporation - */ - -#include - -#include "../i915_selftest.h" -#include "i915_random.h" - -static void __igt_dump_block(struct i915_buddy_mm *mm, - struct i915_buddy_block *block, - bool buddy) -{ - pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n", - block->header, - i915_buddy_block_state(block), - i915_buddy_block_order(block), - i915_buddy_block_offset(block), - i915_buddy_block_size(mm, block), - yesno(!block->parent), - yesno(buddy)); -} - -static void igt_dump_block(struct i915_buddy_mm *mm, - struct i915_buddy_block *block) -{ - struct i915_buddy_block *buddy; - - __igt_dump_block(mm, block, false); - - buddy = get_buddy(block); - if (buddy) - __igt_dump_block(mm, buddy, true); -} - -static int igt_check_block(struct i915_buddy_mm *mm, - struct i915_buddy_block *block) -{ - struct i915_buddy_block *buddy; - unsigned int block_state; - u64 block_size; - u64 offset; - int err = 0; - - block_state = i915_buddy_block_state(block); - - if (block_state != I915_BUDDY_ALLOCATED && - block_state != I915_BUDDY_FREE && - block_state != I915_BUDDY_SPLIT) { - pr_err("block state mismatch\n"); - err = -EINVAL; - } - - block_size = i915_buddy_block_size(mm, block); - offset = i915_buddy_block_offset(block); - - if (block_size < mm->chunk_size) { - pr_err("block size smaller than min size\n"); - err = -EINVAL; - } - - if (!is_power_of_2(block_size)) { - pr_err("block size not power of two\n"); - err = -EINVAL; - } - - if (!IS_ALIGNED(block_size, mm->chunk_size)) { - pr_err("block size not aligned to min size\n"); - err = -EINVAL; - } - - if (!IS_ALIGNED(offset, mm->chunk_size)) { - pr_err("block offset not aligned to min size\n"); - err = -EINVAL; - } - - if (!IS_ALIGNED(offset, block_size)) { - pr_err("block offset not aligned to block size\n"); - err = -EINVAL; - } - - buddy = get_buddy(block); - - if (!buddy && block->parent) { - pr_err("buddy has gone fishing\n"); - err = -EINVAL; - } - - if (buddy) { - if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) { - pr_err("buddy has wrong offset\n"); - err = -EINVAL; - } - - if (i915_buddy_block_size(mm, buddy) != block_size) { - pr_err("buddy size mismatch\n"); - err = -EINVAL; - } - - if (i915_buddy_block_state(buddy) == block_state && - block_state == I915_BUDDY_FREE) { - pr_err("block and its buddy are free\n"); - err = -EINVAL; - } - } - - return err; -} - -static int igt_check_blocks(struct i915_buddy_mm *mm, - struct list_head *blocks, - u64 expected_size, - bool is_contiguous) -{ - struct i915_buddy_block *block; - struct i915_buddy_block *prev; - u64 total; - int err = 0; - - block = NULL; - prev = NULL; - total = 0; - - list_for_each_entry(block, blocks, link) { - err = igt_check_block(mm, block); - - if (!i915_buddy_block_is_allocated(block)) { - pr_err("block not allocated\n"), - err = -EINVAL; - } - - if (is_contiguous && prev) { - u64 prev_block_size; - u64 prev_offset; - u64 offset; - - prev_offset = i915_buddy_block_offset(prev); - prev_block_size = i915_buddy_block_size(mm, prev); - offset = i915_buddy_block_offset(block); - - if (offset != (prev_offset + prev_block_size)) { - pr_err("block offset mismatch\n"); - err = -EINVAL; - } - } - - if (err) - break; - - total += i915_buddy_block_size(mm, block); - prev = block; - } - - if (!err) { - if (total != expected_size) { - pr_err("size mismatch, expected=%llx, found=%llx\n", - expected_size, total); - err = -EINVAL; - } - return err; - } - - if (prev) { - pr_err("prev block, dump:\n"); - igt_dump_block(mm, prev); - } - - if (block) { - pr_err("bad block, dump:\n"); - igt_dump_block(mm, block); - } - - return err; -} - -static int igt_check_mm(struct i915_buddy_mm *mm) -{ - struct i915_buddy_block *root; - struct i915_buddy_block *prev; - unsigned int i; - u64 total; - int err = 0; - - if (!mm->n_roots) { - pr_err("n_roots is zero\n"); - return -EINVAL; - } - - if (mm->n_roots != hweight64(mm->size)) { - pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n", - mm->n_roots, hweight64(mm->size)); - return -EINVAL; - } - - root = NULL; - prev = NULL; - total = 0; - - for (i = 0; i < mm->n_roots; ++i) { - struct i915_buddy_block *block; - unsigned int order; - - root = mm->roots[i]; - if (!root) { - pr_err("root(%u) is NULL\n", i); - err = -EINVAL; - break; - } - - err = igt_check_block(mm, root); - - if (!i915_buddy_block_is_free(root)) { - pr_err("root not free\n"); - err = -EINVAL; - } - - order = i915_buddy_block_order(root); - - if (!i) { - if (order != mm->max_order) { - pr_err("max order root missing\n"); - err = -EINVAL; - } - } - - if (prev) { - u64 prev_block_size; - u64 prev_offset; - u64 offset; - - prev_offset = i915_buddy_block_offset(prev); - prev_block_size = i915_buddy_block_size(mm, prev); - offset = i915_buddy_block_offset(root); - - if (offset != (prev_offset + prev_block_size)) { - pr_err("root offset mismatch\n"); - err = -EINVAL; - } - } - - block = list_first_entry_or_null(&mm->free_list[order], - struct i915_buddy_block, - link); - if (block != root) { - pr_err("root mismatch at order=%u\n", order); - err = -EINVAL; - } - - if (err) - break; - - prev = root; - total += i915_buddy_block_size(mm, root); - } - - if (!err) { - if (total != mm->size) { - pr_err("expected mm size=%llx, found=%llx\n", mm->size, - total); - err = -EINVAL; - } - return err; - } - - if (prev) { - pr_err("prev root(%u), dump:\n", i - 1); - igt_dump_block(mm, prev); - } - - if (root) { - pr_err("bad root(%u), dump:\n", i); - igt_dump_block(mm, root); - } - - return err; -} - -static void igt_mm_config(u64 *size, u64 *chunk_size) -{ - I915_RND_STATE(prng); - u32 s, ms; - - /* Nothing fancy, just try to get an interesting bit pattern */ - - prandom_seed_state(&prng, i915_selftest.random_seed); - - /* Let size be a random number of pages up to 8 GB (2M pages) */ - s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng); - /* Let the chunk size be a random power of 2 less than size */ - ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng)); - /* Round size down to the chunk size */ - s &= -ms; - - /* Convert from pages to bytes */ - *chunk_size = (u64)ms << 12; - *size = (u64)s << 12; -} - -static int igt_buddy_alloc_smoke(void *arg) -{ - struct i915_buddy_mm mm; - IGT_TIMEOUT(end_time); - I915_RND_STATE(prng); - u64 chunk_size; - u64 mm_size; - int *order; - int err, i; - - igt_mm_config(&mm_size, &chunk_size); - - pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size); - - err = i915_buddy_init(&mm, mm_size, chunk_size); - if (err) { - pr_err("buddy_init failed(%d)\n", err); - return err; - } - - order = i915_random_order(mm.max_order + 1, &prng); - if (!order) - goto out_fini; - - for (i = 0; i <= mm.max_order; ++i) { - struct i915_buddy_block *block; - int max_order = order[i]; - bool timeout = false; - LIST_HEAD(blocks); - int order; - u64 total; - - err = igt_check_mm(&mm); - if (err) { - pr_err("pre-mm check failed, abort\n"); - break; - } - - pr_info("filling from max_order=%u\n", max_order); - - order = max_order; - total = 0; - - do { -retry: - block = i915_buddy_alloc(&mm, order); - if (IS_ERR(block)) { - err = PTR_ERR(block); - if (err == -ENOMEM) { - pr_info("buddy_alloc hit -ENOMEM with order=%d\n", - order); - } else { - if (order--) { - err = 0; - goto retry; - } - - pr_err("buddy_alloc with order=%d failed(%d)\n", - order, err); - } - - break; - } - - list_add_tail(&block->link, &blocks); - - if (i915_buddy_block_order(block) != order) { - pr_err("buddy_alloc order mismatch\n"); - err = -EINVAL; - break; - } - - total += i915_buddy_block_size(&mm, block); - - if (__igt_timeout(end_time, NULL)) { - timeout = true; - break; - } - } while (total < mm.size); - - if (!err) - err = igt_check_blocks(&mm, &blocks, total, false); - - i915_buddy_free_list(&mm, &blocks); - - if (!err) { - err = igt_check_mm(&mm); - if (err) - pr_err("post-mm check failed\n"); - } - - if (err || timeout) - break; - - cond_resched(); - } - - if (err == -ENOMEM) - err = 0; - - kfree(order); -out_fini: - i915_buddy_fini(&mm); - - return err; -} - -static int igt_buddy_alloc_pessimistic(void *arg) -{ - const unsigned int max_order = 16; - struct i915_buddy_block *block, *bn; - struct i915_buddy_mm mm; - unsigned int order; - LIST_HEAD(blocks); - int err; - - /* - * Create a pot-sized mm, then allocate one of each possible - * order within. This should leave the mm with exactly one - * page left. - */ - - err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE); - if (err) { - pr_err("buddy_init failed(%d)\n", err); - return err; - } - GEM_BUG_ON(mm.max_order != max_order); - - for (order = 0; order < max_order; order++) { - block = i915_buddy_alloc(&mm, order); - if (IS_ERR(block)) { - pr_info("buddy_alloc hit -ENOMEM with order=%d\n", - order); - err = PTR_ERR(block); - goto err; - } - - list_add_tail(&block->link, &blocks); - } - - /* And now the last remaining block available */ - block = i915_buddy_alloc(&mm, 0); - if (IS_ERR(block)) { - pr_info("buddy_alloc hit -ENOMEM on final alloc\n"); - err = PTR_ERR(block); - goto err; - } - list_add_tail(&block->link, &blocks); - - /* Should be completely full! */ - for (order = max_order; order--; ) { - block = i915_buddy_alloc(&mm, order); - if (!IS_ERR(block)) { - pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", - order); - list_add_tail(&block->link, &blocks); - err = -EINVAL; - goto err; - } - } - - block = list_last_entry(&blocks, typeof(*block), link); - list_del(&block->link); - i915_buddy_free(&mm, block); - - /* As we free in increasing size, we make available larger blocks */ - order = 1; - list_for_each_entry_safe(block, bn, &blocks, link) { - list_del(&block->link); - i915_buddy_free(&mm, block); - - block = i915_buddy_alloc(&mm, order); - if (IS_ERR(block)) { - pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", - order); - err = PTR_ERR(block); - goto err; - } - i915_buddy_free(&mm, block); - order++; - } - - /* To confirm, now the whole mm should be available */ - block = i915_buddy_alloc(&mm, max_order); - if (IS_ERR(block)) { - pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", - max_order); - err = PTR_ERR(block); - goto err; - } - i915_buddy_free(&mm, block); - -err: - i915_buddy_free_list(&mm, &blocks); - i915_buddy_fini(&mm); - return err; -} - -static int igt_buddy_alloc_optimistic(void *arg) -{ - const int max_order = 16; - struct i915_buddy_block *block; - struct i915_buddy_mm mm; - LIST_HEAD(blocks); - int order; - int err; - - /* - * Create a mm with one block of each order available, and - * try to allocate them all. - */ - - err = i915_buddy_init(&mm, - PAGE_SIZE * ((1 << (max_order + 1)) - 1), - PAGE_SIZE); - if (err) { - pr_err("buddy_init failed(%d)\n", err); - return err; - } - GEM_BUG_ON(mm.max_order != max_order); - - for (order = 0; order <= max_order; order++) { - block = i915_buddy_alloc(&mm, order); - if (IS_ERR(block)) { - pr_info("buddy_alloc hit -ENOMEM with order=%d\n", - order); - err = PTR_ERR(block); - goto err; - } - - list_add_tail(&block->link, &blocks); - } - - /* Should be completely full! */ - block = i915_buddy_alloc(&mm, 0); - if (!IS_ERR(block)) { - pr_info("buddy_alloc unexpectedly succeeded, it should be full!"); - list_add_tail(&block->link, &blocks); - err = -EINVAL; - goto err; - } - -err: - i915_buddy_free_list(&mm, &blocks); - i915_buddy_fini(&mm); - return err; -} - -static int igt_buddy_alloc_pathological(void *arg) -{ - const int max_order = 16; - struct i915_buddy_block *block; - struct i915_buddy_mm mm; - LIST_HEAD(blocks); - LIST_HEAD(holes); - int order, top; - int err; - - /* - * Create a pot-sized mm, then allocate one of each possible - * order within. This should leave the mm with exactly one - * page left. Free the largest block, then whittle down again. - * Eventually we will have a fully 50% fragmented mm. - */ - - err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE); - if (err) { - pr_err("buddy_init failed(%d)\n", err); - return err; - } - GEM_BUG_ON(mm.max_order != max_order); - - for (top = max_order; top; top--) { - /* Make room by freeing the largest allocated block */ - block = list_first_entry_or_null(&blocks, typeof(*block), link); - if (block) { - list_del(&block->link); - i915_buddy_free(&mm, block); - } - - for (order = top; order--; ) { - block = i915_buddy_alloc(&mm, order); - if (IS_ERR(block)) { - pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n", - order, top); - err = PTR_ERR(block); - goto err; - } - list_add_tail(&block->link, &blocks); - } - - /* There should be one final page for this sub-allocation */ - block = i915_buddy_alloc(&mm, 0); - if (IS_ERR(block)) { - pr_info("buddy_alloc hit -ENOMEM for hole\n"); - err = PTR_ERR(block); - goto err; - } - list_add_tail(&block->link, &holes); - - block = i915_buddy_alloc(&mm, top); - if (!IS_ERR(block)) { - pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", - top, max_order); - list_add_tail(&block->link, &blocks); - err = -EINVAL; - goto err; - } - } - - i915_buddy_free_list(&mm, &holes); - - /* Nothing larger than blocks of chunk_size now available */ - for (order = 1; order <= max_order; order++) { - block = i915_buddy_alloc(&mm, order); - if (!IS_ERR(block)) { - pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", - order); - list_add_tail(&block->link, &blocks); - err = -EINVAL; - goto err; - } - } - -err: - list_splice_tail(&holes, &blocks); - i915_buddy_free_list(&mm, &blocks); - i915_buddy_fini(&mm); - return err; -} - -static int igt_buddy_alloc_range(void *arg) -{ - struct i915_buddy_mm mm; - unsigned long page_num; - LIST_HEAD(blocks); - u64 chunk_size; - u64 offset; - u64 size; - u64 rem; - int err; - - igt_mm_config(&size, &chunk_size); - - pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size); - - err = i915_buddy_init(&mm, size, chunk_size); - if (err) { - pr_err("buddy_init failed(%d)\n", err); - return err; - } - - err = igt_check_mm(&mm); - if (err) { - pr_err("pre-mm check failed, abort, abort, abort!\n"); - goto err_fini; - } - - rem = mm.size; - offset = 0; - - for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) { - struct i915_buddy_block *block; - LIST_HEAD(tmp); - - size = min(page_num * mm.chunk_size, rem); - - err = i915_buddy_alloc_range(&mm, &tmp, offset, size); - if (err) { - if (err == -ENOMEM) { - pr_info("alloc_range hit -ENOMEM with size=%llx\n", - size); - } else { - pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n", - offset, size, err); - } - - break; - } - - block = list_first_entry_or_null(&tmp, - struct i915_buddy_block, - link); - if (!block) { - pr_err("alloc_range has no blocks\n"); - err = -EINVAL; - break; - } - - if (i915_buddy_block_offset(block) != offset) { - pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n", - i915_buddy_block_offset(block), offset); - err = -EINVAL; - } - - if (!err) - err = igt_check_blocks(&mm, &tmp, size, true); - - list_splice_tail(&tmp, &blocks); - - if (err) - break; - - offset += size; - - rem -= size; - if (!rem) - break; - - cond_resched(); - } - - if (err == -ENOMEM) - err = 0; - - i915_buddy_free_list(&mm, &blocks); - - if (!err) { - err = igt_check_mm(&mm); - if (err) - pr_err("post-mm check failed\n"); - } - -err_fini: - i915_buddy_fini(&mm); - - return err; -} - -static int igt_buddy_alloc_limit(void *arg) -{ - struct i915_buddy_block *block; - struct i915_buddy_mm mm; - const u64 size = U64_MAX; - int err; - - err = i915_buddy_init(&mm, size, PAGE_SIZE); - if (err) - return err; - - if (mm.max_order != I915_BUDDY_MAX_ORDER) { - pr_err("mm.max_order(%d) != %d\n", - mm.max_order, I915_BUDDY_MAX_ORDER); - err = -EINVAL; - goto out_fini; - } - - block = i915_buddy_alloc(&mm, mm.max_order); - if (IS_ERR(block)) { - err = PTR_ERR(block); - goto out_fini; - } - - if (i915_buddy_block_order(block) != mm.max_order) { - pr_err("block order(%d) != %d\n", - i915_buddy_block_order(block), mm.max_order); - err = -EINVAL; - goto out_free; - } - - if (i915_buddy_block_size(&mm, block) != - BIT_ULL(mm.max_order) * PAGE_SIZE) { - pr_err("block size(%llu) != %llu\n", - i915_buddy_block_size(&mm, block), - BIT_ULL(mm.max_order) * PAGE_SIZE); - err = -EINVAL; - goto out_free; - } - -out_free: - i915_buddy_free(&mm, block); -out_fini: - i915_buddy_fini(&mm); - return err; -} - -int i915_buddy_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_buddy_alloc_pessimistic), - SUBTEST(igt_buddy_alloc_optimistic), - SUBTEST(igt_buddy_alloc_pathological), - SUBTEST(igt_buddy_alloc_smoke), - SUBTEST(igt_buddy_alloc_range), - SUBTEST(igt_buddy_alloc_limit), - }; - - return i915_subtests(tests, NULL); -} diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 3db34d3eea58..34e5caf38093 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -33,5 +33,4 @@ selftest(evict, i915_gem_evict_mock_selftests) selftest(gtt, i915_gem_gtt_mock_selftests) selftest(hugepages, i915_gem_huge_page_mock_selftests) selftest(contexts, i915_gem_context_mock_selftests) -selftest(buddy, i915_buddy_mock_selftests) selftest(memory_region, intel_memory_region_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index f85fd8cbfbf5..c85d516b85cd 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -57,9 +57,10 @@ static int igt_mock_fill(void *arg) LIST_HEAD(objects); int err = 0; - page_size = mem->mm.chunk_size; - max_pages = div64_u64(total, page_size); + page_size = mem->chunk_size; rem = total; +retry: + max_pages = div64_u64(rem, page_size); for_each_prime_number_from(page_num, 1, max_pages) { resource_size_t size = page_num * page_size; @@ -85,6 +86,11 @@ static int igt_mock_fill(void *arg) err = 0; if (err == -ENXIO) { if (page_num * page_size <= rem) { + if (mem->is_range_manager && max_pages > 1) { + max_pages >>= 1; + goto retry; + } + pr_err("%s failed, space still left in region\n", __func__); err = -EINVAL; @@ -199,12 +205,18 @@ static int igt_mock_reserve(void *arg) do { u32 size = i915_prandom_u32_max_state(cur_avail, &prng); +retry: size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE); obj = igt_object_create(mem, &objects, size, 0); if (IS_ERR(obj)) { - if (PTR_ERR(obj) == -ENXIO) + if (PTR_ERR(obj) == -ENXIO) { + if (mem->is_range_manager && + size > mem->chunk_size) { + size >>= 1; + goto retry; + } break; - + } err = PTR_ERR(obj); goto out_close; } @@ -220,7 +232,7 @@ static int igt_mock_reserve(void *arg) out_close: kfree(order); close_objects(mem, &objects); - i915_buddy_free_list(&mem->mm, &mem->reserved); + intel_memory_region_unreserve(mem); return err; } @@ -240,7 +252,7 @@ static int igt_mock_contiguous(void *arg) total = resource_size(&mem->region); /* Min size */ - obj = igt_object_create(mem, &objects, mem->mm.chunk_size, + obj = igt_object_create(mem, &objects, mem->chunk_size, I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -321,14 +333,16 @@ static int igt_mock_contiguous(void *arg) min = target; target = total >> 1; - /* Make sure we can still allocate all the fragmented space */ - obj = igt_object_create(mem, &objects, target, 0); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_close_objects; - } + if (!mem->is_range_manager) { + /* Make sure we can still allocate all the fragmented space */ + obj = igt_object_create(mem, &objects, target, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } - igt_object_release(obj); + igt_object_release(obj); + } /* * Even though we have enough free space, we don't have a big enough @@ -348,7 +362,7 @@ static int igt_mock_contiguous(void *arg) } target >>= 1; - } while (target >= mem->mm.chunk_size); + } while (target >= mem->chunk_size); err_close_objects: list_splice_tail(&holes, &objects); @@ -368,7 +382,7 @@ static int igt_mock_splintered_region(void *arg) /* * Sanity check we can still allocate everything even if the - * mm.max_order != mm.size. i.e our starting address space size is not a + * max_order != mm.size. i.e our starting address space size is not a * power-of-two. */ @@ -377,17 +391,10 @@ static int igt_mock_splintered_region(void *arg) if (IS_ERR(mem)) return PTR_ERR(mem); - if (mem->mm.size != size) { - pr_err("%s size mismatch(%llu != %llu)\n", - __func__, mem->mm.size, size); - err = -EINVAL; - goto out_put; - } - expected_order = get_order(rounddown_pow_of_two(size)); - if (mem->mm.max_order != expected_order) { + if (mem->max_order != expected_order) { pr_err("%s order mismatch(%u != %u)\n", - __func__, mem->mm.max_order, expected_order); + __func__, mem->max_order, expected_order); err = -EINVAL; goto out_put; } @@ -408,12 +415,15 @@ static int igt_mock_splintered_region(void *arg) * sure that does indeed hold true. */ - obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS); - if (!IS_ERR(obj)) { - pr_err("%s too large contiguous allocation was not rejected\n", - __func__); - err = -EINVAL; - goto out_close; + if (!mem->is_range_manager) { + obj = igt_object_create(mem, &objects, size, + I915_BO_ALLOC_CONTIGUOUS); + if (!IS_ERR(obj)) { + pr_err("%s too large contiguous allocation was not rejected\n", + __func__); + err = -EINVAL; + goto out_close; + } } obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size), @@ -432,68 +442,6 @@ out_put: return err; } -#ifndef SZ_8G -#define SZ_8G BIT_ULL(33) -#endif - -static int igt_mock_max_segment(void *arg) -{ - const unsigned int max_segment = i915_sg_segment_size(); - struct intel_memory_region *mem = arg; - struct drm_i915_private *i915 = mem->i915; - struct drm_i915_gem_object *obj; - struct i915_buddy_block *block; - struct scatterlist *sg; - LIST_HEAD(objects); - u64 size; - int err = 0; - - /* - * While we may create very large contiguous blocks, we may need - * to break those down for consumption elsewhere. In particular, - * dma-mapping with scatterlist elements have an implicit limit of - * UINT_MAX on each element. - */ - - size = SZ_8G; - mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0); - if (IS_ERR(mem)) - return PTR_ERR(mem); - - obj = igt_object_create(mem, &objects, size, 0); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_put; - } - - size = 0; - list_for_each_entry(block, &obj->mm.blocks, link) { - if (i915_buddy_block_size(&mem->mm, block) > size) - size = i915_buddy_block_size(&mem->mm, block); - } - if (size < max_segment) { - pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n", - __func__, max_segment, size); - err = -EINVAL; - goto out_close; - } - - for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) { - if (sg->length > max_segment) { - pr_err("%s: Created an oversized scatterlist entry, %u > %u\n", - __func__, sg->length, max_segment); - err = -EINVAL; - goto out_close; - } - } - -out_close: - close_objects(mem, &objects); -out_put: - intel_memory_region_put(mem); - return err; -} - static int igt_gpu_write_dw(struct intel_context *ce, struct i915_vma *vma, u32 dword, @@ -1098,7 +1046,6 @@ int intel_memory_region_mock_selftests(void) SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), SUBTEST(igt_mock_splintered_region), - SUBTEST(igt_mock_max_segment), }; struct intel_memory_region *mem; struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index cf40004bc92a..d189c4bd4bef 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -32,6 +32,7 @@ #include "gt/intel_gt_requests.h" #include "gt/mock_engine.h" #include "intel_memory_region.h" +#include "intel_region_ttm.h" #include "mock_request.h" #include "mock_gem_device.h" @@ -70,6 +71,7 @@ static void mock_device_release(struct drm_device *dev) mock_fini_ggtt(&i915->ggtt); destroy_workqueue(i915->wq); + intel_region_ttm_device_fini(i915); intel_gt_driver_late_release(&i915->gt); intel_memory_regions_driver_release(i915); @@ -116,6 +118,7 @@ struct drm_i915_private *mock_gem_device(void) #endif struct drm_i915_private *i915; struct pci_dev *pdev; + int ret; pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); if (!pdev) @@ -178,6 +181,10 @@ struct drm_i915_private *mock_gem_device(void) atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ i915->gt.awake = -ENODEV; + ret = intel_region_ttm_device_init(i915); + if (ret) + goto err_ttm; + i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) goto err_drv; @@ -201,6 +208,7 @@ struct drm_i915_private *mock_gem_device(void) intel_engines_driver_register(i915); i915->do_release = true; + ida_init(&i915->selftest.mock_region_instances); return i915; @@ -209,6 +217,8 @@ err_context: err_unlock: destroy_workqueue(i915->wq); err_drv: + intel_region_ttm_device_fini(i915); +err_ttm: intel_gt_driver_late_release(&i915->gt); intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 5d2d010a1e22..eafc5a04975c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -1,17 +1,56 @@ // SPDX-License-Identifier: MIT /* - * Copyright © 2019 Intel Corporation + * Copyright © 2019-2021 Intel Corporation */ +#include + +#include + #include "gem/i915_gem_region.h" #include "intel_memory_region.h" +#include "intel_region_ttm.h" #include "mock_region.h" +static void mock_region_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node); + sg_free_table(pages); + kfree(pages); +} + +static int mock_region_get_pages(struct drm_i915_gem_object *obj) +{ + unsigned int flags; + struct sg_table *pages; + + flags = I915_ALLOC_MIN_PAGE_SIZE; + if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) + flags |= I915_ALLOC_CONTIGUOUS; + + obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region, + obj->base.size, + flags); + if (IS_ERR(obj->mm.st_mm_node)) + return PTR_ERR(obj->mm.st_mm_node); + + pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node); + if (IS_ERR(pages)) { + intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node); + return PTR_ERR(pages); + } + + __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); + + return 0; +} + static const struct drm_i915_gem_object_ops mock_region_obj_ops = { .name = "mock-region", - .get_pages = i915_gem_object_get_pages_buddy, - .put_pages = i915_gem_object_put_pages_buddy, + .get_pages = mock_region_get_pages, + .put_pages = mock_region_put_pages, .release = i915_gem_object_release_memory_region, }; @@ -23,7 +62,7 @@ static int mock_object_init(struct intel_memory_region *mem, static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; - if (size > mem->mm.size) + if (size > resource_size(&mem->region)) return -E2BIG; drm_gem_private_object_init(&i915->drm, &obj->base, size); @@ -38,9 +77,18 @@ static int mock_object_init(struct intel_memory_region *mem, return 0; } +static void mock_region_fini(struct intel_memory_region *mem) +{ + struct drm_i915_private *i915 = mem->i915; + int instance = mem->instance; + + intel_region_ttm_fini(mem); + ida_free(&i915->selftest.mock_region_instances, instance); +} + static const struct intel_memory_region_ops mock_region_ops = { - .init = intel_memory_region_init_buddy, - .release = intel_memory_region_release_buddy, + .init = intel_region_ttm_init, + .release = mock_region_fini, .init_object = mock_object_init, }; @@ -51,6 +99,14 @@ mock_region_create(struct drm_i915_private *i915, resource_size_t min_page_size, resource_size_t io_start) { + int instance = ida_alloc_max(&i915->selftest.mock_region_instances, + TTM_NUM_MEM_TYPES - TTM_PL_PRIV - 1, + GFP_KERNEL); + + if (instance < 0) + return ERR_PTR(instance); + return intel_memory_region_create(i915, start, size, min_page_size, - io_start, &mock_region_ops); + io_start, INTEL_MEMORY_MOCK, instance, + &mock_region_ops); } -- cgit From f4db23f2c0d18c3a41746326e3eda0402b5c6b93 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Wed, 2 Jun 2021 10:38:09 +0200 Subject: drm/i915/ttm: Embed a ttm buffer object in the i915 gem object MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Embed a struct ttm_buffer_object into the i915 gem object, making sure we alias the gem object part. It's a bit unfortunate that the struct ttm_buffer_ojbect embeds a gem object since we otherwise could make the TTM part private to the TTM backend, and use the usual i915 gem object for the other backends. To make this a bit more storage efficient for the other backends, we'd have to use a pointer for the gem object which would require a lot of changes in the driver. We postpone that for later. Signed-off-by: Thomas Hellström Acked-by: Maarten Lankhorst Signed-off-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210602083818.241793-3-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 7 +++++++ drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 12 +++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 2be6109d0093..5706d471692d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -62,6 +62,13 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops, struct lock_class_key *key, unsigned flags) { + /* + * A gem object is embedded both in a struct ttm_buffer_object :/ and + * in a drm_i915_gem_object. Make sure they are aliased. + */ + BUILD_BUG_ON(offsetof(typeof(*obj), base) != + offsetof(typeof(*obj), __do_not_access.base)); + spin_lock_init(&obj->vma.lock); INIT_LIST_HEAD(&obj->vma.list); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index f5b46d11e6e6..d047ea126029 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -10,6 +10,7 @@ #include #include +#include #include #include "i915_active.h" @@ -99,7 +100,16 @@ struct i915_gem_object_page_iter { }; struct drm_i915_gem_object { - struct drm_gem_object base; + /* + * We might have reason to revisit the below since it wastes + * a lot of space for non-ttm gem objects. + * In any case, always use the accessors for the ttm_buffer_object + * when accessing it. + */ + union { + struct drm_gem_object base; + struct ttm_buffer_object __do_not_access; + }; const struct drm_i915_gem_object_ops *ops; -- cgit From 40e1956ec505a1aba96f9d202308a1ece87b6b93 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Sat, 5 Jun 2021 08:53:54 -0700 Subject: drm/i915/gem: replace IS_GEN and friends with GRAPHICS_VER This was done by the following semantic patch: @@ expression i915; @@ - INTEL_GEN(i915) + GRAPHICS_VER(i915) @@ expression i915; expression E; @@ - INTEL_GEN(i915) >= E + GRAPHICS_VER(i915) >= E @@ expression dev_priv; expression E; @@ - !IS_GEN(dev_priv, E) + GRAPHICS_VER(dev_priv) != E @@ expression dev_priv; expression E; @@ - IS_GEN(dev_priv, E) + GRAPHICS_VER(dev_priv) == E @@ expression dev_priv; expression from, until; @@ - IS_GEN_RANGE(dev_priv, from, until) + IS_GRAPHICS_VER(dev_priv, from, until) @def@ expression E; identifier id =~ "^gen$"; @@ - id = GRAPHICS_VER(E) + ver = GRAPHICS_VER(E) @@ identifier def.id; @@ - id + ver It also takes care of renaming the variable we assign to GRAPHICS_VER() so to use "ver" rather than "gen". Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20210605155356.4183026-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 6 +++--- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 10 +++++----- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 8 ++++---- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 16 ++++++++-------- drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 12 ++++++------ drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c | 10 +++++----- drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c | 4 ++-- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 16 ++++++++-------- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 14 +++++++------- drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c | 10 +++++----- 11 files changed, 54 insertions(+), 54 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 188dee13e017..7720b8c22c81 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1190,7 +1190,7 @@ static void set_ppgtt_barrier(void *data) { struct i915_address_space *old = data; - if (INTEL_GEN(old->i915) < 8) + if (GRAPHICS_VER(old->i915) < 8) gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); i915_vm_close(old); @@ -1436,7 +1436,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt, context->max_eus_per_subslice = user->max_eus_per_subslice; /* Part specific restrictions. */ - if (IS_GEN(i915, 11)) { + if (GRAPHICS_VER(i915) == 11) { unsigned int hw_s = hweight8(device->slice_mask); unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); unsigned int req_s = hweight8(context->slice_mask); @@ -1503,7 +1503,7 @@ static int set_sseu(struct i915_gem_context *ctx, if (args->size < sizeof(user_sseu)) return -EINVAL; - if (!IS_GEN(i915, 11)) + if (GRAPHICS_VER(i915) != 11) return -ENODEV; if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 297143511f99..24c0582e46fb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -500,7 +500,7 @@ eb_validate_vma(struct i915_execbuffer *eb, * also covers all platforms with local memory. */ if (entry->relocation_count && - INTEL_GEN(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915)) + GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915)) return -EINVAL; if (unlikely(entry->flags & eb->invalid_flags)) @@ -1439,7 +1439,7 @@ err_pool: static bool reloc_can_use_engine(const struct intel_engine_cs *engine) { - return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6); + return engine->class != VIDEO_DECODE_CLASS || GRAPHICS_VER(engine->i915) != 6; } static u32 *reloc_gpu(struct i915_execbuffer *eb, @@ -1671,7 +1671,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, * batchbuffers. */ if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - IS_GEN(eb->i915, 6)) { + GRAPHICS_VER(eb->i915) == 6) { err = i915_vma_bind(target->vma, target->vma->obj->cache_level, PIN_GLOBAL, NULL); @@ -2332,7 +2332,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) u32 *cs; int i; - if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) { + if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) { drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -3375,7 +3375,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { - if (INTEL_GEN(i915) >= 11) + if (GRAPHICS_VER(i915) >= 11) return -ENODEV; /* Return -EPERM to trigger fallback code on old binaries. */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 65db290efd16..ee0bf8811388 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -64,7 +64,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, /* mmap ioctl is disallowed for all platforms after TGL-LP. This also * covers all platforms with local memory. */ - if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915)) + if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915)) return -EOPNOTSUPP; if (args->flags & ~(I915_MMAP_WC)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index df8e8c18c6c9..3e28c68fda3e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -72,7 +72,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); - if (INTEL_GEN(i915) >= 8) { + if (GRAPHICS_VER(i915) >= 8) { *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2); *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; *cmd++ = 0; @@ -232,7 +232,7 @@ static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size) { u32 height = size >> PAGE_SHIFT; - if (!IS_GEN(i915, 11)) + if (GRAPHICS_VER(i915) != 11) return false; return height % 4 == 3 && height <= 8; @@ -297,7 +297,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, size = min_t(u64, rem, block_size); GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); - if (INTEL_GEN(i915) >= 9 && + if (GRAPHICS_VER(i915) >= 9 && !wa_1209644611_applies(i915, size)) { *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2); *cmd++ = BLT_DEPTH_32 | PAGE_SIZE; @@ -309,7 +309,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, *cmd++ = PAGE_SIZE; *cmd++ = lower_32_bits(src_offset); *cmd++ = upper_32_bits(src_offset); - } else if (INTEL_GEN(i915) >= 8) { + } else if (GRAPHICS_VER(i915) >= 8) { *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2); *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; *cmd++ = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 092d7a21de82..b0c3a7dc60d1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -38,7 +38,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, return -ENODEV; /* WaSkipStolenMemoryFirstPage:bdw+ */ - if (INTEL_GEN(i915) >= 8 && start < 4096) + if (GRAPHICS_VER(i915) >= 8 && start < 4096) start = 4096; mutex_lock(&i915->mm.stolen_lock); @@ -84,14 +84,14 @@ static int i915_adjust_stolen(struct drm_i915_private *i915, */ /* Make sure we don't clobber the GTT if it's within stolen memory */ - if (INTEL_GEN(i915) <= 4 && + if (GRAPHICS_VER(i915) <= 4 && !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { struct resource stolen[2] = {*dsm, *dsm}; struct resource ggtt_res; resource_size_t ggtt_start; ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); - if (IS_GEN(i915, 4)) + if (GRAPHICS_VER(i915) == 4) ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; else @@ -156,7 +156,7 @@ static int i915_adjust_stolen(struct drm_i915_private *i915, * GEN3 firmware likes to smash pci bridges into the stolen * range. Apparently this works. */ - if (!r && !IS_GEN(i915, 3)) { + if (!r && GRAPHICS_VER(i915) != 3) { drm_err(&i915->drm, "conflict detected with stolen region: %pR\n", dsm); @@ -197,7 +197,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915, * Whether ILK really reuses the ELK register for this is unclear. * Let's see if we catch anyone with this supposedly enabled on ILK. */ - drm_WARN(&i915->drm, IS_GEN(i915, 5), + drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, "ILK stolen reserved found? 0x%08x\n", reg_val); @@ -399,7 +399,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) return 0; } - if (intel_vtd_active() && INTEL_GEN(i915) < 8) { + if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) { drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "DMAR active"); @@ -421,7 +421,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) reserved_base = stolen_top; reserved_size = 0; - switch (INTEL_GEN(i915)) { + switch (GRAPHICS_VER(i915)) { case 2: case 3: break; @@ -456,7 +456,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) &reserved_base, &reserved_size); break; default: - MISSING_CASE(INTEL_GEN(i915)); + MISSING_CASE(GRAPHICS_VER(i915)); fallthrough; case 11: case 12: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 9e8945013090..ef4d0f7dc118 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -62,14 +62,14 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915, GEM_BUG_ON(!stride); - if (INTEL_GEN(i915) >= 4) { + if (GRAPHICS_VER(i915) >= 4) { stride *= i915_gem_tile_height(tiling); GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE)); return roundup(size, stride); } /* Previous chips need a power-of-two fence region when tiling */ - if (IS_GEN(i915, 3)) + if (GRAPHICS_VER(i915) == 3) ggtt_size = 1024*1024; else ggtt_size = 512*1024; @@ -102,7 +102,7 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, if (tiling == I915_TILING_NONE) return I915_GTT_MIN_ALIGNMENT; - if (INTEL_GEN(i915) >= 4) + if (GRAPHICS_VER(i915) >= 4) return I965_FENCE_PAGE; /* @@ -130,10 +130,10 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, /* check maximum stride & object size */ /* i965+ stores the end address of the gtt mapping in the fence * reg, so dont bother to check the size */ - if (INTEL_GEN(i915) >= 7) { + if (GRAPHICS_VER(i915) >= 7) { if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) return false; - } else if (INTEL_GEN(i915) >= 4) { + } else if (GRAPHICS_VER(i915) >= 4) { if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; } else { @@ -144,7 +144,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, return false; } - if (IS_GEN(i915, 2) || + if (GRAPHICS_VER(i915) == 2 || (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) tile_width = 128; else diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index d36873885cc1..176e6b22f87f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -152,8 +152,8 @@ static int prepare_blit(const struct tiled_blits *t, struct blit_buffer *src, struct drm_i915_gem_object *batch) { - const int gen = INTEL_GEN(to_i915(batch->base.dev)); - bool use_64b_reloc = gen >= 8; + const int ver = GRAPHICS_VER(to_i915(batch->base.dev)); + bool use_64b_reloc = ver >= 8; u32 src_pitch, dst_pitch; u32 cmd, *cs; @@ -171,7 +171,7 @@ static int prepare_blit(const struct tiled_blits *t, *cs++ = cmd; cmd = MI_FLUSH_DW; - if (gen >= 8) + if (ver >= 8) cmd++; *cs++ = cmd; *cs++ = 0; @@ -179,7 +179,7 @@ static int prepare_blit(const struct tiled_blits *t, *cs++ = 0; cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2); - if (gen >= 8) + if (ver >= 8) cmd += 2; src_pitch = t->width * 4; @@ -666,7 +666,7 @@ static int igt_client_tiled_blits(void *arg) int inst = 0; /* Test requires explicit BLT tiling controls */ - if (INTEL_GEN(i915) < 4) + if (GRAPHICS_VER(i915) < 4) return 0; if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */ diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index e937b6629019..13b088cc787e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -221,12 +221,12 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v) goto out_rq; } - if (INTEL_GEN(ctx->engine->i915) >= 8) { + if (GRAPHICS_VER(ctx->engine->i915) >= 8) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = v; - } else if (INTEL_GEN(ctx->engine->i915) >= 4) { + } else if (GRAPHICS_VER(ctx->engine->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = i915_ggtt_offset(vma) + offset; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index ce70d0a3afb2..dbcfa28a9d91 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -897,7 +897,7 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *v { u32 *cmd; - GEM_BUG_ON(INTEL_GEN(vma->vm->i915) < 8); + GEM_BUG_ON(GRAPHICS_VER(vma->vm->i915) < 8); cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB); if (IS_ERR(cmd)) @@ -932,7 +932,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - if (INTEL_GEN(i915) < 8) + if (GRAPHICS_VER(i915) < 8) return -EINVAL; vma = i915_vma_instance(obj, ce->vm, NULL); @@ -1100,7 +1100,7 @@ __read_slice_count(struct intel_context *ce, return ret; } - if (INTEL_GEN(ce->engine->i915) >= 11) { + if (GRAPHICS_VER(ce->engine->i915) >= 11) { s_mask = GEN11_RPCS_S_CNT_MASK; s_shift = GEN11_RPCS_S_CNT_SHIFT; } else { @@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, int inst = 0; int ret = 0; - if (INTEL_GEN(i915) < 9) + if (GRAPHICS_VER(i915) < 9) return 0; if (flags & TEST_RESET) @@ -1518,7 +1518,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, } *cmd++ = MI_STORE_DWORD_IMM_GEN4; - if (INTEL_GEN(i915) >= 8) { + if (GRAPHICS_VER(i915) >= 8) { *cmd++ = lower_32_bits(offset); *cmd++ = upper_32_bits(offset); } else { @@ -1608,7 +1608,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, if (IS_ERR(obj)) return PTR_ERR(obj); - if (INTEL_GEN(i915) >= 8) { + if (GRAPHICS_VER(i915) >= 8) { const u32 GPR0 = engine->mmio_base + 0x600; vm = i915_gem_context_get_vm_rcu(ctx); @@ -1776,7 +1776,7 @@ static int igt_vm_isolation(void *arg) u32 expected; int err; - if (INTEL_GEN(i915) < 7) + if (GRAPHICS_VER(i915) < 7) return 0; /* @@ -1830,7 +1830,7 @@ static int igt_vm_isolation(void *arg) continue; /* Not all engines have their own GPR! */ - if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS) + if (GRAPHICS_VER(i915) < 8 && engine->class != RENDER_CLASS) continue; while (!__igt_timeout(end_time, NULL)) { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 05a3b29f545e..3a30955285d6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -273,7 +273,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, static unsigned int setup_tile_size(struct tile *tile, struct drm_i915_private *i915) { - if (INTEL_GEN(i915) <= 2) { + if (GRAPHICS_VER(i915) <= 2) { tile->height = 16; tile->width = 128; tile->size = 11; @@ -288,9 +288,9 @@ setup_tile_size(struct tile *tile, struct drm_i915_private *i915) tile->size = 12; } - if (INTEL_GEN(i915) < 4) + if (GRAPHICS_VER(i915) < 4) return 8192 / tile->width; - else if (INTEL_GEN(i915) < 7) + else if (GRAPHICS_VER(i915) < 7) return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; else return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; @@ -386,7 +386,7 @@ static int igt_partial_tiling(void *arg) if (err) goto out_unlock; - if (pitch > 2 && INTEL_GEN(i915) >= 4) { + if (pitch > 2 && GRAPHICS_VER(i915) >= 4) { tile.stride = tile.width * (pitch - 1); err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) @@ -395,7 +395,7 @@ static int igt_partial_tiling(void *arg) goto out_unlock; } - if (pitch < max_pitch && INTEL_GEN(i915) >= 4) { + if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) { tile.stride = tile.width * (pitch + 1); err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) @@ -405,7 +405,7 @@ static int igt_partial_tiling(void *arg) } } - if (INTEL_GEN(i915) >= 4) { + if (GRAPHICS_VER(i915) >= 4) { for_each_prime_number(pitch, max_pitch) { tile.stride = tile.width * pitch; err = check_partial_mappings(obj, &tile, end); @@ -501,7 +501,7 @@ static int igt_smoke_tiling(void *arg) tile.stride = i915_prandom_u32_max_state(max_pitch, &prng); tile.stride = (1 + tile.stride) * tile.width; - if (INTEL_GEN(i915) < 4) + if (GRAPHICS_VER(i915) < 4) tile.stride = rounddown_pow_of_two(tile.stride); } diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 0b092c62bb34..b35c1219c852 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -44,7 +44,7 @@ igt_emit_store_dw(struct i915_vma *vma, u32 val) { struct drm_i915_gem_object *obj; - const int gen = INTEL_GEN(vma->vm->i915); + const int ver = GRAPHICS_VER(vma->vm->i915); unsigned long n, size; u32 *cmd; int err; @@ -65,14 +65,14 @@ igt_emit_store_dw(struct i915_vma *vma, offset += vma->node.start; for (n = 0; n < count; n++) { - if (gen >= 8) { + if (ver >= 8) { *cmd++ = MI_STORE_DWORD_IMM_GEN4; *cmd++ = lower_32_bits(offset); *cmd++ = upper_32_bits(offset); *cmd++ = val; - } else if (gen >= 4) { + } else if (ver >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? MI_USE_GGTT : 0); + (ver < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = val; @@ -146,7 +146,7 @@ int igt_gpu_fill_dw(struct intel_context *ce, goto skip_request; flags = 0; - if (INTEL_GEN(ce->vm->i915) <= 5) + if (GRAPHICS_VER(ce->vm->i915) <= 5) flags |= I915_DISPATCH_SECURE; err = rq->engine->emit_bb_start(rq, -- cgit From 6edbd6abb783d54f6ac4c3ed5cd9e50cff6c15e9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 10 May 2021 16:14:09 +0200 Subject: dma-buf: rename and cleanup dma_resv_get_excl v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the comment needs to state explicitly that this doesn't get a reference to the object then the function is named rather badly. Rename the function and use rcu_dereference_check(), this way it can be used from both rcu as well as lock protected critical sections. v2: improve kerneldoc as suggested by Daniel v3: use dma_resv_excl_fence as function name Signed-off-by: Christian König Acked-by: Daniel Vetter Reviewed-by: Jason Ekstrand Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-4-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 5 ++--- drivers/dma-buf/dma-resv.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 3 +-- drivers/gpu/drm/msm/msm_gem.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +- drivers/gpu/drm/radeon/radeon_display.c | 2 +- drivers/gpu/drm/radeon/radeon_sync.c | 2 +- drivers/gpu/drm/radeon/radeon_uvd.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- include/linux/dma-resv.h | 14 ++++++-------- 15 files changed, 26 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index ee04fb442015..d419cf90ee73 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -234,7 +234,7 @@ retry: shared_count = fobj->shared_count; else shared_count = 0; - fence_excl = rcu_dereference(resv->fence_excl); + fence_excl = dma_resv_excl_fence(resv); if (read_seqcount_retry(&resv->seq, seq)) { rcu_read_unlock(); goto retry; @@ -1382,8 +1382,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) buf_obj->name ?: ""); robj = buf_obj->resv; - fence = rcu_dereference_protected(robj->fence_excl, - dma_resv_held(robj)); + fence = dma_resv_excl_fence(robj); if (fence) seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", fence->ops->get_driver_name(fence), diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 6132ba631991..ed7b4e8f002f 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -284,7 +284,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence); */ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) { - struct dma_fence *old_fence = dma_resv_get_excl(obj); + struct dma_fence *old_fence = dma_resv_excl_fence(obj); struct dma_resv_list *old; u32 i = 0; @@ -380,7 +380,7 @@ retry: rcu_read_unlock(); src_list = dma_resv_get_list(dst); - old = dma_resv_get_excl(dst); + old = dma_resv_excl_fence(dst); write_seqcount_begin(&dst->seq); /* write_seqcount_begin provides the necessary memory barrier */ @@ -428,7 +428,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj, rcu_read_lock(); seq = read_seqcount_begin(&obj->seq); - fence_excl = rcu_dereference(obj->fence_excl); + fence_excl = dma_resv_excl_fence(obj); if (fence_excl && !dma_fence_get_rcu(fence_excl)) goto unlock; @@ -523,7 +523,7 @@ retry: rcu_read_lock(); i = -1; - fence = rcu_dereference(obj->fence_excl); + fence = dma_resv_excl_fence(obj); if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (!dma_fence_get_rcu(fence)) goto unlock_retry; @@ -645,7 +645,7 @@ retry: } if (!shared_count) { - struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); + struct dma_fence *fence_excl = dma_resv_excl_fence(obj); if (fence_excl) { ret = dma_resv_test_signaled_single(fence_excl); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 73c76a3e2b12..7d5aaf584634 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, if (!amdgpu_vm_ready(vm)) goto out_unlock; - fence = dma_resv_get_excl(bo->tbo.base.resv); + fence = dma_resv_excl_fence(bo->tbo.base.resv); if (fence) { amdgpu_bo_fence(bo, fence, true); fence = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 4e558632a5d2..2bdc9df5c6b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return -EINVAL; /* always sync to the exclusive fence */ - f = dma_resv_get_excl(resv); + f = dma_resv_excl_fence(resv); r = amdgpu_sync_fence(sync, f); flist = dma_resv_get_list(resv); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index db69f19ab5bc..2237fe5204d0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) } } - fence = rcu_dereference(robj->fence_excl); + fence = dma_resv_excl_fence(robj); if (fence) etnaviv_gem_describe_fence(fence, "Exclusive", m); rcu_read_unlock(); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 25235ef630c1..088d375b3395 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -113,8 +113,7 @@ retry: seq = raw_read_seqcount(&obj->base.resv->seq); /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = - busy_check_writer(rcu_dereference(obj->base.resv->fence_excl)); + args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); /* Translate shared fences to READ set of engines */ list = rcu_dereference(obj->base.resv->fence); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 56df86e5f740..a5a2a922e3e8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fobj = dma_resv_get_list(obj->resv); if (!fobj || (fobj->shared_count == 0)) { - fence = dma_resv_get_excl(obj->resv); + fence = dma_resv_excl_fence(obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { ret = dma_fence_wait(fence, true); @@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, } } - fence = rcu_dereference(robj->fence_excl); + fence = dma_resv_excl_fence(robj); if (fence) describe_fence(fence, "Exclusive", m); rcu_read_unlock(); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c3d20bc80022..520b1ea9d16c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -951,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); + struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index e5dcbf67de7e..19c096de5bdc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e } fobj = dma_resv_get_list(resv); - fence = dma_resv_get_excl(resv); + fence = dma_resv_excl_fence(resv); if (fence && (!exclusive || !fobj || !fobj->shared_count)) { struct nouveau_channel *prev = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 652af7a134bd..406681317419 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); + work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv)); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 5d3302945076..c8a1711325de 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev, int r = 0; /* always sync to the exclusive fence */ - f = dma_resv_get_excl(resv); + f = dma_resv_excl_fence(resv); fence = f ? to_radeon_fence(f) : NULL; if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index dfa9fdbe98da..1f5b1a5c0a09 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - f = dma_resv_get_excl(bo->tbo.base.resv); + f = dma_resv_excl_fence(bo->tbo.base.resv); if (f) { r = radeon_fence_wait((struct radeon_fence *)f, false); if (r) { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4ed56520b81d..1752f8e523e7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) rcu_read_lock(); fobj = rcu_dereference(resv->fence); - fence = rcu_dereference(resv->fence_excl); + fence = dma_resv_excl_fence(resv); if (fence && !fence->ops->signaled) dma_fence_enable_sw_signaling(fence); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 62ea920addc3..7b45393ad98e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, if (bo->moving) dma_fence_put(bo->moving); bo->moving = dma_fence_get - (dma_resv_get_excl(bo->base.resv)); + (dma_resv_excl_fence(bo->base.resv)); } return 0; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index f32a3d176513..e3a7f740bb06 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -226,22 +226,20 @@ static inline void dma_resv_unlock(struct dma_resv *obj) } /** - * dma_resv_get_excl - get the reservation object's - * exclusive fence, with update-side lock held + * dma_resv_exclusive - return the object's exclusive fence * @obj: the reservation object * - * Returns the exclusive fence (if any). Does NOT take a - * reference. Writers must hold obj->lock, readers may only - * hold a RCU read side lock. + * Returns the exclusive fence (if any). Caller must either hold the objects + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), + * or one of the variants of each * * RETURNS * The exclusive fence or NULL */ static inline struct dma_fence * -dma_resv_get_excl(struct dma_resv *obj) +dma_resv_excl_fence(struct dma_resv *obj) { - return rcu_dereference_protected(obj->fence_excl, - dma_resv_held(obj)); + return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); } /** -- cgit From fb5ce730f21434d8100942cf1dbe1acda255fbeb Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 11 May 2021 14:11:41 +0200 Subject: dma-buf: rename and cleanup dma_resv_get_list v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the comment needs to state explicitly that this is doesn't get a reference to the object then the function is named rather badly. Rename the function and use it in even more places. v2: use dma_resv_shared_list as new name Signed-off-by: Christian König Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-5-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 32 ++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 4 +-- drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +- drivers/gpu/drm/qxl/qxl_debugfs.c | 2 +- drivers/gpu/drm/radeon/radeon_sync.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- include/linux/dma-resv.h | 27 ++++++++++---------- 13 files changed, 41 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index ed7b4e8f002f..62e7e055ac62 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) dma_resv_assert_held(obj); - old = dma_resv_get_list(obj); - + old = dma_resv_shared_list(obj); if (old && old->shared_max) { if ((old->shared_count + num_fences) <= old->shared_max) return 0; @@ -219,12 +218,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared); */ void dma_resv_reset_shared_max(struct dma_resv *obj) { - /* Test shared fence slot reservation */ - if (rcu_access_pointer(obj->fence)) { - struct dma_resv_list *fence = dma_resv_get_list(obj); + struct dma_resv_list *fences = dma_resv_shared_list(obj); - fence->shared_max = fence->shared_count; - } + dma_resv_assert_held(obj); + + /* Test shared fence slot reservation */ + if (fences) + fences->shared_max = fences->shared_count; } EXPORT_SYMBOL(dma_resv_reset_shared_max); #endif @@ -247,7 +247,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) dma_resv_assert_held(obj); - fobj = dma_resv_get_list(obj); + fobj = dma_resv_shared_list(obj); count = fobj->shared_count; write_seqcount_begin(&obj->seq); @@ -290,7 +290,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) dma_resv_assert_held(obj); - old = dma_resv_get_list(obj); + old = dma_resv_shared_list(obj); if (old) i = old->shared_count; @@ -329,7 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) dma_resv_assert_held(dst); rcu_read_lock(); - src_list = rcu_dereference(src->fence); + src_list = dma_resv_shared_list(src); retry: if (src_list) { @@ -342,7 +342,7 @@ retry: return -ENOMEM; rcu_read_lock(); - src_list = rcu_dereference(src->fence); + src_list = dma_resv_shared_list(src); if (!src_list || src_list->shared_count > shared_count) { kfree(dst_list); goto retry; @@ -360,7 +360,7 @@ retry: if (!dma_fence_get_rcu(fence)) { dma_resv_list_free(dst_list); - src_list = rcu_dereference(src->fence); + src_list = dma_resv_shared_list(src); goto retry; } @@ -379,7 +379,7 @@ retry: new = dma_fence_get_rcu_safe(&src->fence_excl); rcu_read_unlock(); - src_list = dma_resv_get_list(dst); + src_list = dma_resv_shared_list(dst); old = dma_resv_excl_fence(dst); write_seqcount_begin(&dst->seq); @@ -432,7 +432,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj, if (fence_excl && !dma_fence_get_rcu(fence_excl)) goto unlock; - fobj = rcu_dereference(obj->fence); + fobj = dma_resv_shared_list(obj); if (fobj) sz += sizeof(*shared) * fobj->shared_max; @@ -538,7 +538,7 @@ retry: } if (wait_all) { - struct dma_resv_list *fobj = rcu_dereference(obj->fence); + struct dma_resv_list *fobj = dma_resv_shared_list(obj); if (fobj) shared_count = fobj->shared_count; @@ -623,7 +623,7 @@ retry: seq = read_seqcount_begin(&obj->seq); if (test_all) { - struct dma_resv_list *fobj = rcu_dereference(obj->fence); + struct dma_resv_list *fobj = dma_resv_shared_list(obj); unsigned int i; if (fobj) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index d5e6519bdea1..65528592d5bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, if (!ef) return -EINVAL; - old = dma_resv_get_list(resv); + old = dma_resv_shared_list(resv); if (!old) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 6dd0ea6e9e24..04caa31056d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj) unsigned int count; int r; - if (!dma_resv_get_list(obj)) /* no shared fences to convert */ + if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ return 0; r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 2bdc9df5c6b9..1b2ceccaf5b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, f = dma_resv_excl_fence(resv); r = amdgpu_sync_fence(sync, f); - flist = dma_resv_get_list(resv); + flist = dma_resv_shared_list(resv); if (!flist || r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index df1f185faae9..53a8ab8ce2a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1339,7 +1339,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_get_list(bo->base.resv); + flist = dma_resv_shared_list(bo->base.resv); if (flist) { for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 2237fe5204d0..8792d8dd5106 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) off, etnaviv_obj->vaddr, obj->size); rcu_read_lock(); - fobj = rcu_dereference(robj->fence); + fobj = dma_resv_shared_list(robj); if (fobj) { unsigned int i, shared_count = fobj->shared_count; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 088d375b3395..35279dd561f5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -116,7 +116,7 @@ retry: args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); /* Translate shared fences to READ set of engines */ - list = rcu_dereference(obj->base.resv->fence); + list = dma_resv_shared_list(obj->base.resv); if (list) { unsigned int shared_count = list->shared_count, i; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index a5a2a922e3e8..410a93a7e77f 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, struct dma_fence *fence; int i, ret; - fobj = dma_resv_get_list(obj->resv); + fobj = dma_resv_shared_list(obj->resv); if (!fobj || (fobj->shared_count == 0)) { fence = dma_resv_excl_fence(obj->resv); /* don't need to wait on our own fences, since ring is fifo */ @@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, } rcu_read_lock(); - fobj = rcu_dereference(robj->fence); + fobj = dma_resv_shared_list(robj); if (fobj) { unsigned int i, shared_count = fobj->shared_count; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 19c096de5bdc..6b43918035df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e return ret; } - fobj = dma_resv_get_list(resv); + fobj = dma_resv_shared_list(resv); fence = dma_resv_excl_fence(resv); if (fence && (!exclusive || !fobj || !fobj->shared_count)) { diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 183d15e2cf58..1f9a59601bb1 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) int rel; rcu_read_lock(); - fobj = rcu_dereference(bo->tbo.base.resv->fence); + fobj = dma_resv_shared_list(bo->tbo.base.resv); rel = fobj ? fobj->shared_count : 0; rcu_read_unlock(); diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index c8a1711325de..9257b60144c4 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev, else if (f) r = dma_fence_wait(f, true); - flist = dma_resv_get_list(resv); + flist = dma_resv_shared_list(resv); if (shared || !flist || r) return r; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1752f8e523e7..f04a269b7065 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) int i; rcu_read_lock(); - fobj = rcu_dereference(resv->fence); + fobj = dma_resv_shared_list(resv); fence = dma_resv_excl_fence(resv); if (fence && !fence->ops->signaled) dma_fence_enable_sw_signaling(fence); diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index e3a7f740bb06..8dc19d65a217 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -78,20 +78,6 @@ struct dma_resv { #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) -/** - * dma_resv_get_list - get the reservation object's - * shared fence list, with update-side lock held - * @obj: the reservation object - * - * Returns the shared fence list. Does NOT take references to - * the fence. The obj->lock must be held. - */ -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) -{ - return rcu_dereference_protected(obj->fence, - dma_resv_held(obj)); -} - #ifdef CONFIG_DEBUG_MUTEXES void dma_resv_reset_shared_max(struct dma_resv *obj); #else @@ -268,6 +254,19 @@ dma_resv_get_excl_rcu(struct dma_resv *obj) return fence; } +/** + * dma_resv_shared_list - get the reservation object's shared fence list + * @obj: the reservation object + * + * Returns the shared fence list. Caller must either hold the objects + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), + * or one of the variants of each + */ +static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) +{ + return rcu_dereference_check(obj->fence, dma_resv_held(obj)); +} + void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); -- cgit From 6b41323a265a02b7af906c6d6fd93f6cddd7ac12 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 2 Jun 2021 12:44:32 +0200 Subject: dma-buf: rename dma_resv_get_excl_rcu to _unlocked MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That describes much better what the function is doing here. Signed-off-by: Christian König Reviewed-by: Jason Ekstrand Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-6-christian.koenig@amd.com --- drivers/gpu/drm/drm_gem.c | 2 +- drivers/gpu/drm/drm_gem_atomic_helper.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object.h | 2 +- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 4 ++-- drivers/gpu/drm/i915/i915_request.c | 2 +- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/nouveau/dispnv50/wndw.c | 2 +- drivers/gpu/drm/panfrost/panfrost_job.c | 2 +- include/linux/dma-resv.h | 4 ++-- 11 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 9989425e9875..263b4fb03303 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1375,7 +1375,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array, if (!write) { struct dma_fence *fence = - dma_resv_get_excl_rcu(obj->resv); + dma_resv_get_excl_unlocked(obj->resv); return drm_gem_fence_array_add(fence_array, fence); } diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index a005c5a0ba46..a27135084ae5 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st return 0; obj = drm_gem_fb_get_obj(state->fb, 0); - fence = dma_resv_get_excl_rcu(obj->resv); + fence = dma_resv_get_excl_unlocked(obj->resv); drm_atomic_set_fence_for_plane(state, fence); return 0; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index d05c35994579..c942d2a8c252 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -195,7 +195,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) if (ret) return ret; } else { - bo->excl = dma_resv_get_excl_rcu(robj); + bo->excl = dma_resv_get_excl_unlocked(robj); } } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 384ff0bb6e19..f17c5f54feb6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -11040,7 +11040,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, if (ret < 0) goto unpin_fb; - fence = dma_resv_get_excl_rcu(obj->base.resv); + fence = dma_resv_get_excl_unlocked(obj->base.resv); if (fence) { add_rps_boost_after_vblank(new_plane_state->hw.crtc, fence); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 2ebd79537aea..7c0eb425cb3b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) struct dma_fence *fence; rcu_read_lock(); - fence = dma_resv_get_excl_rcu(obj->base.resv); + fence = dma_resv_get_excl_unlocked(obj->base.resv); rcu_read_unlock(); if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 4b9856d5ba14..c13aeddf5aa7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, */ prune_fences = count && timeout >= 0; } else { - excl = dma_resv_get_excl_rcu(resv); + excl = dma_resv_get_excl_unlocked(resv); } if (excl && timeout >= 0) @@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, kfree(shared); } else { - excl = dma_resv_get_excl_rcu(obj->base.resv); + excl = dma_resv_get_excl_unlocked(obj->base.resv); } if (excl) { diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index bec9c3652188..c85494f411f4 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to, dma_fence_put(shared[i]); kfree(shared); } else { - excl = dma_resv_get_excl_rcu(obj->base.resv); + excl = dma_resv_get_excl_unlocked(obj->base.resv); } if (excl) { diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 2744558f3050..7aaf74552d06 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, dma_fence_put(shared[i]); kfree(shared); } else { - excl = dma_resv_get_excl_rcu(resv); + excl = dma_resv_get_excl_unlocked(resv); } if (ret >= 0 && excl && excl->ops != exclude) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 0cb1f9d848d3..8d048bacd6f0 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) asyw->image.handle[0] = ctxdma->object.handle; } - asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv); + asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv); asyw->image.offset[0] = nvbo->offset; if (wndw->func->prepare) { diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 6003cfeb1322..2df3e999a38d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos, int i; for (i = 0; i < bo_count; i++) - implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv); + implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv); } static void panfrost_attach_object_fences(struct drm_gem_object **bos, diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 8dc19d65a217..3e0eefcead44 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -229,7 +229,7 @@ dma_resv_excl_fence(struct dma_resv *obj) } /** - * dma_resv_get_excl_rcu - get the reservation object's + * dma_resv_get_excl_unlocked - get the reservation object's * exclusive fence, without lock held. * @obj: the reservation object * @@ -240,7 +240,7 @@ dma_resv_excl_fence(struct dma_resv *obj) * The exclusive fence or NULL if none */ static inline struct dma_fence * -dma_resv_get_excl_rcu(struct dma_resv *obj) +dma_resv_get_excl_unlocked(struct dma_resv *obj) { struct dma_fence *fence; -- cgit From d3fae3b3daac09961ab871a25093b0ae404282d5 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 2 Jun 2021 13:01:15 +0200 Subject: dma-buf: drop the _rcu postfix on function names v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The functions can be called both in _rcu context as well as while holding the lock. v2: add some kerneldoc as suggested by Daniel v3: fix indentation Signed-off-by: Christian König Reviewed-by: Jason Ekstrand Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-7-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 3 +-- drivers/dma-buf/dma-resv.c | 32 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ++++---- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++-- drivers/gpu/drm/drm_gem.c | 5 ++-- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 ++--- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 6 ++--- drivers/gpu/drm/i915/dma_resv_utils.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +-- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 6 ++--- drivers/gpu/drm/i915/i915_request.c | 4 +-- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 3 +-- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +-- drivers/gpu/drm/panfrost/panfrost_drv.c | 3 +-- drivers/gpu/drm/radeon/radeon_gem.c | 6 ++--- drivers/gpu/drm/radeon/radeon_mn.c | 4 +-- drivers/gpu/drm/ttm/ttm_bo.c | 18 ++++++------- drivers/gpu/drm/vgem/vgem_fence.c | 3 +-- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 5 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 ++--- include/linux/dma-resv.h | 17 ++++-------- 31 files changed, 84 insertions(+), 103 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index d419cf90ee73..511fe0d217a0 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, long ret; /* Wait on any implicit rendering fences */ - ret = dma_resv_wait_timeout_rcu(resv, write, true, - MAX_SCHEDULE_TIMEOUT); + ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 62e7e055ac62..f26c71747d43 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -396,7 +396,7 @@ retry: EXPORT_SYMBOL(dma_resv_copy_fences); /** - * dma_resv_get_fences_rcu - Get an object's shared and exclusive + * dma_resv_get_fences - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object * @pfence_excl: the returned exclusive fence (or NULL) @@ -408,10 +408,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * exclusive fence is not specified the fence is put into the array of the * shared fences as well. Returns either zero or -ENOMEM. */ -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned int *pshared_count, - struct dma_fence ***pshared) +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned int *pshared_count, + struct dma_fence ***pshared) { struct dma_fence **shared = NULL; struct dma_fence *fence_excl; @@ -494,23 +493,24 @@ unlock: *pshared = shared; return ret; } -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); +EXPORT_SYMBOL_GPL(dma_resv_get_fences); /** - * dma_resv_wait_timeout_rcu - Wait on reservation's objects + * dma_resv_wait_timeout - Wait on reservation's objects * shared and/or exclusive fences. * @obj: the reservation object * @wait_all: if true, wait on all fences, else wait on just exclusive fence * @intr: if true, do interruptible wait * @timeout: timeout value in jiffies or zero to return immediately * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already * RETURNS * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than zer on success. */ -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, - bool wait_all, bool intr, - unsigned long timeout) +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout) { long ret = timeout ? timeout : 1; unsigned int seq, shared_count; @@ -582,7 +582,7 @@ unlock_retry: rcu_read_unlock(); goto retry; } -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) @@ -602,16 +602,18 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) } /** - * dma_resv_test_signaled_rcu - Test if a reservation object's - * fences have been signaled. + * dma_resv_test_signaled - Test if a reservation object's fences have been + * signaled. * @obj: the reservation object * @test_all: if true, test all fences, otherwise only test the exclusive * fence * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already * RETURNS * true if all fences signaled, else false */ -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { unsigned int seq, shared_count; int ret; @@ -660,7 +662,7 @@ retry: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); +EXPORT_SYMBOL_GPL(dma_resv_test_signaled); #if IS_ENABLED(CONFIG_LOCKDEP) static int __init dma_resv_lockdep(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 49f73b5b89b0..ac7b37dfff5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, - &work->shared_count, - &work->shared); + r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, + &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); goto unpin; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 04caa31056d0..c3053b83b80c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj) if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ return 0; - r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); + r = dma_resv_get_fences(obj, NULL, &count, &fences); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7d5aaf584634..1c3e3b608332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, - timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index b4971e90b98c..df69b1e9e451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, unsigned count; int r; - r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); + r = dma_resv_get_fences(resv, NULL, &count, &fences); if (r) goto fallback; @@ -156,8 +156,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, - MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 2741c28ff1b5..d6c54c7f7679 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 19c1384a133f..96447e1d4c9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -756,8 +756,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, + MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 82f0542c7792..a692a4570627 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1126,9 +1126,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, - true, false, - msecs_to_jiffies(10)); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; if (r < 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bcfd4a8d0288..d1a229212e7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2022,13 +2022,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) unsigned i, shared_count; int r; - r = dma_resv_get_fences_rcu(resv, &excl, - &shared_count, &shared); + r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } @@ -2640,7 +2639,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) return false; /* Try to block ongoing updates */ @@ -2820,8 +2819,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, + true, timeout); if (timeout <= 0) return timeout; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3267eb2e35dd..6dde2873d47b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8400,9 +8400,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * deadlock during GPU reset when this fence will not signal * but we hold reservation lock for the BO. */ - r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, - false, - msecs_to_jiffies(5000)); + r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, + msecs_to_jiffies(5000)); if (unlikely(r <= 0)) DRM_ERROR("Waiting for fences timed out!"); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 263b4fb03303..d62fb1a3c916 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, return -EINVAL; } - ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, - true, timeout); + ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); if (ret == 0) ret = -ETIME; else if (ret > 0) @@ -1380,7 +1379,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array, return drm_gem_fence_array_add(fence_array, fence); } - ret = dma_resv_get_fences_rcu(obj->resv, NULL, + ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences); if (ret || !fence_count) return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 8792d8dd5106..b8fa6ed3dd73 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (op & ETNA_PREP_NOSYNC) { - if (!dma_resv_test_signaled_rcu(obj->resv, - write)) + if (!dma_resv_test_signaled(obj->resv, write)) return -EBUSY; } else { unsigned long remain = etnaviv_timeout_to_jiffies(timeout); - ret = dma_resv_wait_timeout_rcu(obj->resv, - write, true, remain); + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); if (ret <= 0) return ret == 0 ? -ETIMEDOUT : ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index c942d2a8c252..d53856d7a747 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -189,9 +189,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) continue; if (bo->flags & ETNA_SUBMIT_BO_WRITE) { - ret = dma_resv_get_fences_rcu(robj, &bo->excl, - &bo->nr_shared, - &bo->shared); + ret = dma_resv_get_fences(robj, &bo->excl, + &bo->nr_shared, + &bo->shared); if (ret) return ret; } else { diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c index 9e508e7d4629..7df91b7e4ca8 100644 --- a/drivers/gpu/drm/i915/dma_resv_utils.c +++ b/drivers/gpu/drm/i915/dma_resv_utils.c @@ -10,7 +10,7 @@ void dma_resv_prune(struct dma_resv *resv) { if (dma_resv_trylock(resv)) { - if (dma_resv_test_signaled_rcu(resv, true)) + if (dma_resv_test_signaled(resv, true)) dma_resv_add_excl_fence(resv, NULL); dma_resv_unlock(resv); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 35279dd561f5..6234e17259c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * Alternatively, we can trade that extra information on read/write * activity with * args->busy = - * !dma_resv_test_signaled_rcu(obj->resv, true); + * !dma_resv_test_signaled(obj->resv, true); * to report the overall busyness. This is what the wait-ioctl does. * */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 297143511f99..66789111a24b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma) if (DBG_FORCE_RELOC) return false; - return !dma_resv_test_signaled_rcu(vma->resv, true); + return !dma_resv_test_signaled(vma->resv, true); } static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index a657b99ec760..b5cbbe659a77 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, return true; /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(obj->base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index c13aeddf5aa7..1e97520c62b2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int count, i; int ret; - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); + ret = dma_resv_get_fences(resv, &excl, &count, &shared); if (ret) return ret; @@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int count, i; int ret; - ret = dma_resv_get_fences_rcu(obj->base.resv, - &excl, &count, &shared); + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, + &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c85494f411f4..6cb91f042642 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to, struct dma_fence **shared; unsigned int count, i; - ret = dma_resv_get_fences_rcu(obj->base.resv, - &excl, &count, &shared); + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, + &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 7aaf74552d06..c589a681da77 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct dma_fence **shared; unsigned int count, i; - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); + ret = dma_resv_get_fences(resv, &excl, &count, &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 410a93a7e77f..a94a43de95ef 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = dma_resv_wait_timeout_rcu(obj->resv, write, - true, remain); + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; else if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index d863e5ed954a..5b27845075a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, - no_wait ? 0 : 30 * HZ); + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, + no_wait ? 0 : 30 * HZ); if (!lret) ret = -EBUSY; else if (lret > 0) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1596559f3d14..075ec0ef746c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -312,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, if (!gem_obj) return -ENOENT; - ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, - true, timeout); + ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout); if (!ret) ret = timeout ? -ETIMEDOUT : -EBUSY; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3272c33af8fe..458f92a70887 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); if (!r) r = -EBUSY; @@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); + r = dma_resv_test_signaled(robj->tbo.base.resv, true); if (r == 0) r = -EBUSY; else @@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); if (ret == 0) r = -EBUSY; else if (ret < 0) diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index e37c9a57a7c3..9fa88549c89e 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, return true; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f04a269b7065..7e7284da5630 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled_rcu(resv, true)) + if (dma_resv_test_signaled(resv, true)) ret = 0; else ret = -EBUSY; @@ -308,8 +308,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, - 30 * HZ); + lret = dma_resv_wait_timeout(resv, true, interruptible, + 30 * HZ); if (lret < 0) return lret; @@ -411,8 +411,8 @@ static void ttm_bo_release(struct kref *kref) /* Last resort, if we fail to allocate memory for the * fences block for the BO to become idle */ - dma_resv_wait_timeout_rcu(bo->base.resv, true, false, - 30 * HZ); + dma_resv_wait_timeout(bo->base.resv, true, false, + 30 * HZ); } if (bo->bdev->funcs->release_notify) @@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || + if (!dma_resv_test_signaled(bo->base.resv, true) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -1094,14 +1094,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled_rcu(bo->base.resv, true)) + if (dma_resv_test_signaled(bo->base.resv, true)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, - interruptible, timeout); + timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, + timeout); if (timeout < 0) return timeout; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 2902dc6e64fa..bd6f75285fd9 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Check for a conflicting fence */ resv = obj->resv; - if (!dma_resv_test_signaled_rcu(resv, - arg->flags & VGEM_FENCE_WRITE)) { + if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { ret = -EBUSY; goto err_fence; } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 669f2ee39515..5c1ad1596889 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -451,10 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, return -ENOENT; if (args->flags & VIRTGPU_WAIT_NOWAIT) { - ret = dma_resv_test_signaled_rcu(obj->resv, true); + ret = dma_resv_test_signaled(obj->resv, true); } else { - ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, - timeout); + ret = dma_resv_wait_timeout(obj->resv, true, true, timeout); } if (ret == 0) ret = -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 176b6201ef2b..362f56d5b12b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -743,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, if (flags & drm_vmw_synccpu_allow_cs) { long lret; - lret = dma_resv_wait_timeout_rcu - (bo->base.resv, true, true, - nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); + lret = dma_resv_wait_timeout(bo->base.resv, true, true, + nonblock ? 0 : + MAX_SCHEDULE_TIMEOUT); if (!lret) return -EBUSY; else if (lret < 0) diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 3e0eefcead44..562b885cf9c3 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -271,19 +271,12 @@ void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); - void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); - -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); - +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); - -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); - -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); #endif /* _LINUX_RESERVATION_H */ -- cgit From c90c4c6574f3feaf2203b5671db1907a1e15c653 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Thu, 24 Jun 2021 13:29:14 +0200 Subject: drm/i915: Reinstate the mmap ioctl for some platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reinstate the mmap ioctl for all current integrated platforms. The intention was really to have it disabled for discrete graphics where we enforce a single mmap mode. This was reported to break ADL-P with the media stack, which was not the intention. Although longer term we do still plan to sunset this ioctl even for integrated, in favour of using mmap_offset instead. Fixes: 35cbd91eb541 ("drm/i915: Disable mmap ioctl for gen12+") Signed-off-by: Thomas Hellström Reviewed-by: Matthew Auld Acked-by: Daniel Vetter Signed-off-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210624112914.311984-1-thomas.hellstrom@linux.intel.com (cherry picked from commit d3f3baa3562a5d09f3e87f5fdf84952112807753) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/gem') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index ee0bf8811388..215326764606 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -61,10 +61,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; unsigned long addr; - /* mmap ioctl is disallowed for all platforms after TGL-LP. This also - * covers all platforms with local memory. + /* + * mmap ioctl is disallowed for all discrete platforms, + * and for all platforms with GRAPHICS_VER > 12. */ - if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915)) + if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12) return -EOPNOTSUPP; if (args->flags & ~(I915_MMAP_WC)) -- cgit