diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_vma.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_vma.c | 61 |
1 files changed, 46 insertions, 15 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index f914ddbaea89..3c1dc9241831 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -5,6 +5,7 @@ */ #include "msm_drv.h" +#include "msm_fence.h" #include "msm_gem.h" #include "msm_mmu.h" @@ -37,14 +38,31 @@ msm_gem_address_space_get(struct msm_gem_address_space *aspace) return aspace; } +bool msm_gem_vma_inuse(struct msm_gem_vma *vma) +{ + if (vma->inuse > 0) + return true; + + while (vma->fence_mask) { + unsigned idx = ffs(vma->fence_mask) - 1; + + if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx])) + return true; + + vma->fence_mask &= ~BIT(idx); + } + + return false; +} + /* Actually unmap memory for the vma */ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma) { - unsigned size = vma->node.size << PAGE_SHIFT; + unsigned size = vma->node.size; /* Print a message if we try to purge a vma in use */ - if (WARN_ON(vma->inuse > 0)) + if (GEM_WARN_ON(msm_gem_vma_inuse(vma))) return; /* Don't do anything if the memory isn't mapped */ @@ -58,22 +76,32 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, } /* Remove reference counts for the mapping */ -void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma) +void msm_gem_unpin_vma(struct msm_gem_vma *vma) { - if (!WARN_ON(!vma->iova)) + if (GEM_WARN_ON(!vma->inuse)) + return; + if (!GEM_WARN_ON(!vma->iova)) vma->inuse--; } +/* Replace pin reference with fence: */ +void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx) +{ + vma->fctx[fctx->index] = fctx; + vma->fence[fctx->index] = fctx->last_fence; + vma->fence_mask |= BIT(fctx->index); + msm_gem_unpin_vma(vma); +} + +/* Map and pin vma: */ int msm_gem_map_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, int prot, - struct sg_table *sgt, int npages) + struct sg_table *sgt, int size) { - unsigned size = npages << PAGE_SHIFT; int ret = 0; - if (WARN_ON(!vma->iova)) + if (GEM_WARN_ON(!vma->iova)) return -EINVAL; /* Increase the usage counter */ @@ -100,7 +128,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, void msm_gem_close_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma) { - if (WARN_ON(vma->inuse > 0 || vma->mapped)) + if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped)) return; spin_lock(&aspace->lock); @@ -115,23 +143,24 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace, /* Initialize a new vma and allocate an iova for it */ int msm_gem_init_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, int npages, + struct msm_gem_vma *vma, int size, u64 range_start, u64 range_end) { int ret; - if (WARN_ON(vma->iova)) + if (GEM_WARN_ON(vma->iova)) return -EBUSY; spin_lock(&aspace->lock); - ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0, - 0, range_start, range_end, 0); + ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, + size, PAGE_SIZE, 0, + range_start, range_end, 0); spin_unlock(&aspace->lock); if (ret) return ret; - vma->iova = vma->node.start << PAGE_SHIFT; + vma->iova = vma->node.start; vma->mapped = false; kref_get(&aspace->kref); @@ -155,8 +184,10 @@ msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, spin_lock_init(&aspace->lock); aspace->name = name; aspace->mmu = mmu; + aspace->va_start = va_start; + aspace->va_size = size; - drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT); + drm_mm_init(&aspace->mm, va_start, size); kref_init(&aspace->kref); |