diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-08 08:56:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-08 08:56:37 -0700 |
commit | a09476668e3016ea4a7b0a7ebd02f44e0546c12c (patch) | |
tree | 0a2e435d40b1785bc3df0c455fbf858adc28d36c /drivers/misc/habanalabs/common/command_buffer.c | |
parent | e8bc52cb8df80c31c73c726ab58ea9746e9ff734 (diff) | |
parent | 7cd04013fbf3e6dcb67ca6b59aa813269a2ad9ce (diff) |
Merge tag 'char-misc-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc and other driver updates from Greg KH:
"Here is the large set of char/misc and other small driver subsystem
changes for 6.1-rc1. Loads of different things in here:
- IIO driver updates, additions, and changes. Probably the largest
part of the diffstat
- habanalabs driver update with support for new hardware and
features, the second largest part of the diff.
- fpga subsystem driver updates and additions
- mhi subsystem updates
- Coresight driver updates
- gnss subsystem updates
- extcon driver updates
- icc subsystem updates
- fsi subsystem updates
- nvmem subsystem and driver updates
- misc driver updates
- speakup driver additions for new features
- lots of tiny driver updates and cleanups
All of these have been in the linux-next tree for a while with no
reported issues"
* tag 'char-misc-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (411 commits)
w1: Split memcpy() of struct cn_msg flexible array
spmi: pmic-arb: increase SPMI transaction timeout delay
spmi: pmic-arb: block access for invalid PMIC arbiter v5 SPMI writes
spmi: pmic-arb: correct duplicate APID to PPID mapping logic
spmi: pmic-arb: add support to dispatch interrupt based on IRQ status
spmi: pmic-arb: check apid against limits before calling irq handler
spmi: pmic-arb: do not ack and clear peripheral interrupts in cleanup_irq
spmi: pmic-arb: handle spurious interrupt
spmi: pmic-arb: add a print in cleanup_irq
drivers: spmi: Directly use ida_alloc()/free()
MAINTAINERS: add TI ECAP driver info
counter: ti-ecap-capture: capture driver support for ECAP
Documentation: ABI: sysfs-bus-counter: add frequency & num_overflows items
dt-bindings: counter: add ti,am62-ecap-capture.yaml
counter: Introduce the COUNTER_COMP_ARRAY component type
counter: Consolidate Counter extension sysfs attribute creation
counter: Introduce the Count capture component
counter: 104-quad-8: Add Signal polarity component
counter: Introduce the Signal polarity component
counter: interrupt-cnt: Implement watch_validate callback
...
Diffstat (limited to 'drivers/misc/habanalabs/common/command_buffer.c')
-rw-r--r-- | drivers/misc/habanalabs/common/command_buffer.c | 127 |
1 files changed, 35 insertions, 92 deletions
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index b027f66f8bd4..2b332991ac6a 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -12,20 +12,18 @@ #include <linux/slab.h> #include <linux/uaccess.h> +#define CB_VA_POOL_SIZE (4UL * SZ_1G) + static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) { struct hl_device *hdev = ctx->hdev; struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_vm_va_block *va_block, *tmp; - dma_addr_t bus_addr; - u64 virt_addr; u32 page_size = prop->pmmu.page_size; - s32 offset; int rc; if (!hdev->supports_cb_mapping) { dev_err_ratelimited(hdev->dev, - "Cannot map CB because no VA range is allocated for CB mapping\n"); + "Mapping a CB to the device's MMU is not supported\n"); return -EINVAL; } @@ -35,106 +33,45 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) return -EINVAL; } - INIT_LIST_HEAD(&cb->va_block_list); - - for (bus_addr = cb->bus_address; - bus_addr < cb->bus_address + cb->size; - bus_addr += page_size) { - - virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size); - if (!virt_addr) { - dev_err(hdev->dev, - "Failed to allocate device virtual address for CB\n"); - rc = -ENOMEM; - goto err_va_pool_free; - } + if (cb->is_mmu_mapped) + return 0; - va_block = kzalloc(sizeof(*va_block), GFP_KERNEL); - if (!va_block) { - rc = -ENOMEM; - gen_pool_free(ctx->cb_va_pool, virt_addr, page_size); - goto err_va_pool_free; - } + cb->roundup_size = roundup(cb->size, page_size); - va_block->start = virt_addr; - va_block->end = virt_addr + page_size - 1; - va_block->size = page_size; - list_add_tail(&va_block->node, &cb->va_block_list); + cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); + if (!cb->virtual_addr) { + dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n"); + return -ENOMEM; } - mutex_lock(&ctx->mmu_lock); - - bus_addr = cb->bus_address; - offset = 0; - list_for_each_entry(va_block, &cb->va_block_list, node) { - rc = hl_mmu_map_page(ctx, va_block->start, bus_addr, - va_block->size, list_is_last(&va_block->node, - &cb->va_block_list)); - if (rc) { - dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", - va_block->start); - goto err_va_umap; - } - - bus_addr += va_block->size; - offset += va_block->size; + mutex_lock(&hdev->mmu_lock); + rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); + if (rc) { + dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); + goto err_va_umap; } - rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV); - - mutex_unlock(&ctx->mmu_lock); + mutex_unlock(&hdev->mmu_lock); cb->is_mmu_mapped = true; - return rc; err_va_umap: - list_for_each_entry(va_block, &cb->va_block_list, node) { - if (offset <= 0) - break; - hl_mmu_unmap_page(ctx, va_block->start, va_block->size, - offset <= va_block->size); - offset -= va_block->size; - } - - rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR); - - mutex_unlock(&ctx->mmu_lock); - -err_va_pool_free: - list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { - gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); - list_del(&va_block->node); - kfree(va_block); - } - + mutex_unlock(&hdev->mmu_lock); + gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); return rc; } static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) { struct hl_device *hdev = ctx->hdev; - struct hl_vm_va_block *va_block, *tmp; - - mutex_lock(&ctx->mmu_lock); - - list_for_each_entry(va_block, &cb->va_block_list, node) - if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size, - list_is_last(&va_block->node, - &cb->va_block_list))) - dev_warn_ratelimited(hdev->dev, - "Failed to unmap CB's va 0x%llx\n", - va_block->start); + mutex_lock(&hdev->mmu_lock); + hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR); + mutex_unlock(&hdev->mmu_lock); - mutex_unlock(&ctx->mmu_lock); - - list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { - gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); - list_del(&va_block->node); - kfree(va_block); - } + gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); } static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) @@ -376,7 +313,6 @@ int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle) static int hl_cb_info(struct hl_mem_mgr *mmg, u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va) { - struct hl_vm_va_block *va_block; struct hl_cb *cb; int rc = 0; @@ -388,9 +324,8 @@ static int hl_cb_info(struct hl_mem_mgr *mmg, } if (flags & HL_CB_FLAGS_GET_DEVICE_VA) { - va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node); - if (va_block) { - *device_va = va_block->start; + if (cb->is_mmu_mapped) { + *device_va = cb->virtual_addr; } else { dev_err(mmg->dev, "CB is not mapped to the device's MMU\n"); rc = -EINVAL; @@ -566,16 +501,23 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx) return -ENOMEM; } - rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr, - prop->cb_va_end_addr - prop->cb_va_start_addr, -1); + ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST, + CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED); + if (!ctx->cb_va_pool_base) { + rc = -ENOMEM; + goto err_pool_destroy; + } + rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1); if (rc) { dev_err(hdev->dev, "Failed to add memory to VA gen pool for CB mapping\n"); - goto err_pool_destroy; + goto err_unreserve_va_block; } return 0; +err_unreserve_va_block: + hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE); err_pool_destroy: gen_pool_destroy(ctx->cb_va_pool); @@ -590,4 +532,5 @@ void hl_cb_va_pool_fini(struct hl_ctx *ctx) return; gen_pool_destroy(ctx->cb_va_pool); + hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE); } |