aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc/habanalabs/common/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/habanalabs/common/context.c')
-rw-r--r--drivers/misc/habanalabs/common/context.c73
1 files changed, 48 insertions, 25 deletions
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index ed2cfd0c6e99..2f4620b7990c 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -102,13 +102,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
hl_device_set_debug_mode(hdev, ctx, false);
hdev->asic_funcs->ctx_fini(ctx);
+
+ hl_dec_ctx_fini(ctx);
+
hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
-
- /* Scrub both SRAM and DRAM */
- hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
hdev->asic_funcs->ctx_fini(ctx);
@@ -125,15 +125,22 @@ void hl_ctx_do_release(struct kref *ref)
hl_ctx_fini(ctx);
- if (ctx->hpriv)
- hl_hpriv_put(ctx->hpriv);
+ if (ctx->hpriv) {
+ struct hl_fpriv *hpriv = ctx->hpriv;
+
+ mutex_lock(&hpriv->ctx_lock);
+ hpriv->ctx = NULL;
+ mutex_unlock(&hpriv->ctx_lock);
+
+ hl_hpriv_put(hpriv);
+ }
kfree(ctx);
}
int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
{
- struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
+ struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
struct hl_ctx *ctx;
int rc;
@@ -143,9 +150,9 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
goto out_err;
}
- mutex_lock(&mgr->ctx_lock);
- rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
- mutex_unlock(&mgr->ctx_lock);
+ mutex_lock(&ctx_mgr->lock);
+ rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
+ mutex_unlock(&ctx_mgr->lock);
if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
@@ -170,9 +177,9 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
return 0;
remove_from_idr:
- mutex_lock(&mgr->ctx_lock);
- idr_remove(&mgr->ctx_handles, ctx->handle);
- mutex_unlock(&mgr->ctx_lock);
+ mutex_lock(&ctx_mgr->lock);
+ idr_remove(&ctx_mgr->handles, ctx->handle);
+ mutex_unlock(&ctx_mgr->lock);
free_ctx:
kfree(ctx);
out_err:
@@ -181,7 +188,7 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- int rc = 0;
+ int rc = 0, i;
ctx->hdev = hdev;
@@ -197,6 +204,13 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (!ctx->cs_pending)
return -ENOMEM;
+ INIT_LIST_HEAD(&ctx->outcome_store.used_list);
+ INIT_LIST_HEAD(&ctx->outcome_store.free_list);
+ hash_init(ctx->outcome_store.outcome_map);
+ for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
+ list_add(&ctx->outcome_store.nodes_pool[i].list_link,
+ &ctx->outcome_store.free_list);
+
hl_hw_block_mem_init(ctx);
if (is_kernel_ctx) {
@@ -262,6 +276,11 @@ err_hw_block_mem_fini:
return rc;
}
+static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
+{
+ return kref_get_unless_zero(&ctx->refcount);
+}
+
void hl_ctx_get(struct hl_ctx *ctx)
{
kref_get(&ctx->refcount);
@@ -280,11 +299,15 @@ struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ mutex_lock(&hpriv->ctx_lock);
+ ctx = hpriv->ctx;
+ if (ctx && !hl_ctx_get_unless_zero(ctx))
+ ctx = NULL;
+ mutex_unlock(&hpriv->ctx_lock);
+
/* There can only be a single user which has opened the compute device, so exit
- * immediately once we find him
+ * immediately once we find its context or if we see that it has been released
*/
- ctx = hpriv->ctx;
- hl_ctx_get(ctx);
break;
}
@@ -376,37 +399,37 @@ int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
/*
* hl_ctx_mgr_init - initialize the context manager
*
- * @mgr: pointer to context manager structure
+ * @ctx_mgr: pointer to context manager structure
*
* This manager is an object inside the hpriv object of the user process.
* The function is called when a user process opens the FD.
*/
-void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
+void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
{
- mutex_init(&mgr->ctx_lock);
- idr_init(&mgr->ctx_handles);
+ mutex_init(&ctx_mgr->lock);
+ idr_init(&ctx_mgr->handles);
}
/*
* hl_ctx_mgr_fini - finalize the context manager
*
* @hdev: pointer to device structure
- * @mgr: pointer to context manager structure
+ * @ctx_mgr: pointer to context manager structure
*
* This function goes over all the contexts in the manager and frees them.
* It is called when a process closes the FD.
*/
-void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
+void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
{
struct hl_ctx *ctx;
struct idr *idp;
u32 id;
- idp = &mgr->ctx_handles;
+ idp = &ctx_mgr->handles;
idr_for_each_entry(idp, ctx, id)
kref_put(&ctx->refcount, hl_ctx_do_release);
- idr_destroy(&mgr->ctx_handles);
- mutex_destroy(&mgr->ctx_lock);
+ idr_destroy(&ctx_mgr->handles);
+ mutex_destroy(&ctx_mgr->lock);
}