diff options
| author | Ingo Molnar <[email protected]> | 2016-10-16 11:31:39 +0200 |
|---|---|---|
| committer | Ingo Molnar <[email protected]> | 2016-10-16 11:31:39 +0200 |
| commit | 1d33369db25eb7f37b7a8bd22d736888b4501a9c (patch) | |
| tree | 116d764339be1bca928870151decbedc53a9e1d1 /drivers/dma-buf/dma-buf.c | |
| parent | 23446cb66c073b827779e5eb3dec301623299b32 (diff) | |
| parent | 1001354ca34179f3db924eb66672442a173147dc (diff) | |
Merge tag 'v4.9-rc1' into x86/urgent, to pick up updates
Signed-off-by: Ingo Molnar <[email protected]>
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
| -rw-r--r-- | drivers/dma-buf/dma-buf.c | 23 |
1 files changed, 23 insertions, 0 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index ddaee60ae52a..cf04d249a6a4 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -586,6 +586,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); +static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + bool write = (direction == DMA_BIDIRECTIONAL || + direction == DMA_TO_DEVICE); + struct reservation_object *resv = dmabuf->resv; + long ret; + + /* Wait on any implicit rendering fences */ + ret = reservation_object_wait_timeout_rcu(resv, write, true, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) + return ret; + + return 0; +} /** * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the @@ -608,6 +624,13 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, if (dmabuf->ops->begin_cpu_access) ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); + /* Ensure that all fences are waited upon - but we first allow + * the native handler the chance to do so more efficiently if it + * chooses. A double invocation here will be reasonably cheap no-op. + */ + if (ret == 0) + ret = __dma_buf_begin_cpu_access(dmabuf, direction); + return ret; } EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); |