aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_bo_util.c
diff options
context:
space:
mode:
authorLinus Torvalds <[email protected]>2020-12-14 11:07:56 -0800
committerLinus Torvalds <[email protected]>2020-12-14 11:07:56 -0800
commit1d36dffa5d887715dacca0f717f4519b7be5e498 (patch)
treea68f7c00dbb3036a67806ed6c6b8cc61c3cff60d /drivers/gpu/drm/ttm/ttm_bo_util.c
parent2c85ebc57b3e1817b6ce1a6b703928e113a90442 (diff)
parentb10733527bfd864605c33ab2e9a886eec317ec39 (diff)
Merge tag 'drm-next-2020-12-11' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Not a huge amount of big things here, AMD has support for a few new HW variants (vangogh, green sardine, dimgrey cavefish), Intel has some more DG1 enablement. We have a few big reworks of the TTM layers and interfaces, GEM and atomic internal API reworks cross tree. fbdev is marked orphaned in here as well to reflect the current reality. core: - documentation updates - deprecate DRM_FORMAT_MOD_NONE - atomic crtc enable/disable rework - GEM convert drivers to gem object functions - remove SCATTER_LIST_MAX_SEGMENT sched: - avoid infinite waits ttm: - remove AGP support - don't modify caching for swapout - ttm pinning rework - major TTM reworks - new backend allocator - multihop support vram-helper: - top down BO placement fix - TTM changes - GEM object support displayport: - DP 2.0 DPCD prep work - DP MST extended DPCD caps fbdev: - mark as orphaned amdgpu: - Initial Vangogh support - Green Sardine support - Dimgrey Cavefish support - SG display support for renoir - SMU7 improvements - gfx9+ modiifier support - CI BACO fixes radeon: - expose voltage via hwmon on SUMO amdkfd: - fix unique id handling i915: - more DG1 enablement - bigjoiner support - integer scaling filter support - async flip support - ICL+ DSI command mode - Improve display shutdown - Display refactoring - eLLC machine fbdev loading fix - dma scatterlist fixes - TGL hang fixes - eLLC display buffer caching on SKL+ - MOCS PTE seeting for gen9+ msm: - Shutdown hook - GPU cooling device support - DSI 7nm and 10nm phy/pll updates - sm8150/sm2850 DPU support - GEM locking re-work - LLCC system cache support aspeed: - sysfs output config support ast: - LUT fix - new display mode gma500: - remove 2d framebuffer accel panfrost: - move gpu reset to a worker exynos: - new HDMI mode support mediatek: - MT8167 support - yaml bindings - MIPI DSI phy code moved etnaviv: - new perf counter - more lockdep annotation hibmc: - i2c DDC support ingenic: - pixel clock reset fix - reserved memory support - allow both DMA channels at once - different pixel format support - 30/24/8-bit palette modes tilcdc: - don't keep vblank irq enabled vc4: - new maintainer added - DSI registration fix virtio: - blob resource support - host visible and cross-device support - uuid api support" * tag 'drm-next-2020-12-11' of git://anongit.freedesktop.org/drm/drm: (1754 commits) drm/amdgpu: Initialise drm_gem_object_funcs for imported BOs drm/amdgpu: fix size calculation with stolen vga memory drm/amdgpu: remove amdgpu_ttm_late_init and amdgpu_bo_late_init drm/amdgpu: free the pre-OS console framebuffer after the first modeset drm/amdgpu: enable runtime pm using BACO on CI dGPUs drm/amdgpu/cik: enable BACO reset on Bonaire drm/amd/pm: update smu10.h WORKLOAD_PPLIB setting for raven drm/amd/pm: remove one unsupported smu function for vangogh drm/amd/display: setup system context for APUs drm/amd/display: add S/G support for Vangogh drm/amdkfd: Fix leak in dmabuf import drm/amdgpu: use AMDGPU_NUM_VMID when possible drm/amdgpu: fix sdma instance fw version and feature version init drm/amd/pm: update driver if version for dimgrey_cavefish drm/amd/display: 3.2.115 drm/amd/display: [FW Promotion] Release 0.0.45 drm/amd/display: Revert DCN2.1 dram_clock_change_latency update drm/amd/display: Enable gpu_vm_support for dcn3.01 drm/amd/display: Fixed the audio noise during mode switching with HDCP mode on drm/amd/display: Add wm table for Renoir ...
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c175
1 files changed, 96 insertions, 79 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fb2a25f8408f..7ccb2295cac1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -32,6 +32,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
+#include <linux/dma-buf-map.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
@@ -45,53 +46,6 @@ struct ttm_transfer_obj {
struct ttm_buffer_object *bo;
};
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
-{
- ttm_resource_free(bo, &bo->mem);
-}
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
-{
- struct ttm_tt *ttm = bo->ttm;
- struct ttm_resource *old_mem = &bo->mem;
- int ret;
-
- if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- pr_err("Failed to expire sync object before unbinding TTM\n");
- return ret;
- }
-
- ttm_bo_tt_unbind(bo);
- ttm_bo_free_old_node(bo);
- old_mem->mem_type = TTM_PL_SYSTEM;
- }
-
- ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
- if (unlikely(ret != 0))
- return ret;
-
- if (new_mem->mem_type != TTM_PL_SYSTEM) {
-
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_tt_bind(bo, new_mem);
- if (unlikely(ret != 0))
- return ret;
- }
-
- ttm_bo_assign_mem(bo, new_mem);
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_move_ttm);
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem)
{
@@ -135,7 +89,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
} else {
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
addr = ioremap_wc(mem->bus.offset, bus_size);
else
addr = ioremap(mem->bus.offset, bus_size);
@@ -227,11 +181,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
void *new_iomap;
int ret;
unsigned long i;
- unsigned long page;
- unsigned long add = 0;
- int dir;
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
@@ -267,29 +218,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out1;
}
- add = 0;
- dir = 1;
-
- if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->start < old_mem->start + old_mem->size)) {
- dir = -1;
- add = new_mem->num_pages - 1;
- }
-
for (i = 0; i < new_mem->num_pages; ++i) {
- page = i * dir + add;
if (old_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(old_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+ pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
prot);
} else if (new_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(new_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+ pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
prot);
} else {
- ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+ ret = ttm_copy_io_page(new_iomap, old_iomap, i);
}
if (ret)
goto out1;
@@ -352,7 +291,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return -ENOMEM;
fbo->base = *bo;
- fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
ttm_bo_get(bo);
fbo->bo = bo;
@@ -372,6 +310,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
+ fbo->base.pin_count = 1;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
@@ -384,21 +323,28 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return 0;
}
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp)
{
+ struct ttm_resource_manager *man;
+ enum ttm_caching caching;
+
+ man = ttm_manager_type(bo->bdev, res->mem_type);
+ caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+
/* Cached mappings need no adjustment */
- if (caching_flags & TTM_PL_FLAG_CACHED)
+ if (caching == ttm_cached)
return tmp;
#if defined(__i386__) || defined(__x86_64__)
- if (caching_flags & TTM_PL_FLAG_WC)
+ if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__mips__)
- if (caching_flags & TTM_PL_FLAG_WC)
+ if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
@@ -422,7 +368,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
size);
else
@@ -452,7 +398,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ if (num_pages == 1 && ttm->caching == ttm_cached) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
@@ -466,7 +412,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
@@ -526,6 +472,77 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
+{
+ struct ttm_resource *mem = &bo->mem;
+ int ret;
+
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
+ if (ret)
+ return ret;
+
+ if (mem->bus.is_iomem) {
+ void __iomem *vaddr_iomem;
+ size_t size = bo->num_pages << PAGE_SHIFT;
+
+ if (mem->bus.addr)
+ vaddr_iomem = (void __iomem *)mem->bus.addr;
+ else if (mem->bus.caching == ttm_write_combined)
+ vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+ else
+ vaddr_iomem = ioremap(mem->bus.offset, size);
+
+ if (!vaddr_iomem)
+ return -ENOMEM;
+
+ dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
+
+ } else {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ struct ttm_tt *ttm = bo->ttm;
+ pgprot_t prot;
+ void *vaddr;
+
+ ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
+ if (ret)
+ return ret;
+
+ /*
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contiguous.
+ */
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
+ vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+ if (!vaddr)
+ return -ENOMEM;
+
+ dma_buf_map_set_vaddr(map, vaddr);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vmap);
+
+void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
+{
+ struct ttm_resource *mem = &bo->mem;
+
+ if (dma_buf_map_is_null(map))
+ return;
+
+ if (!map->is_iomem)
+ vunmap(map->vaddr);
+ else if (!mem->bus.addr)
+ iounmap(map->vaddr_iomem);
+ dma_buf_map_clear(map);
+
+ ttm_mem_io_free(bo->bdev, &bo->mem);
+}
+EXPORT_SYMBOL(ttm_bo_vunmap);
+
static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
bool dst_use_tt)
{
@@ -536,7 +553,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
- ttm_bo_free_old_node(bo);
+ ttm_resource_free(bo, &bo->mem);
return 0;
}
@@ -597,7 +614,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
}
spin_unlock(&from->move_lock);
- ttm_bo_free_old_node(bo);
+ ttm_resource_free(bo, &bo->mem);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);