aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_resource.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_resource.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c330
1 files changed, 306 insertions, 24 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 035d71332d18..65889b3caf50 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -22,17 +22,144 @@
* Authors: Christian König
*/
-#include <linux/dma-buf-map.h>
+#include <linux/iosys-map.h>
#include <linux/io-mapping.h>
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_bo_driver.h>
+/**
+ * ttm_lru_bulk_move_init - initialize a bulk move structure
+ * @bulk: the structure to init
+ *
+ * For now just memset the structure to zero.
+ */
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
+{
+ memset(bulk, 0, sizeof(*bulk));
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_init);
+
+/**
+ * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * resource order never changes. Should be called with &ttm_device.lru_lock held.
+ */
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
+{
+ unsigned i, j;
+
+ for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
+ struct ttm_resource_manager *man;
+
+ if (!pos->first)
+ continue;
+
+ lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
+ dma_resv_assert_held(pos->first->bo->base.resv);
+ dma_resv_assert_held(pos->last->bo->base.resv);
+
+ man = ttm_manager_type(pos->first->bo->bdev, i);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru,
+ &pos->last->lru);
+ }
+ }
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
+
+/* Return the bulk move pos object for this resource */
+static struct ttm_lru_bulk_move_pos *
+ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
+{
+ return &bulk->pos[res->mem_type][res->bo->priority];
+}
+
+/* Move the resource to the tail of the bulk move range */
+static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
+ struct ttm_resource *res)
+{
+ if (pos->last != res) {
+ list_move(&res->lru, &pos->last->lru);
+ pos->last = res;
+ }
+}
+
+/* Add the resource to a bulk_move cursor */
+void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
+{
+ struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
+
+ if (!pos->first) {
+ pos->first = res;
+ pos->last = res;
+ } else {
+ ttm_lru_bulk_move_pos_tail(pos, res);
+ }
+}
+
+/* Remove the resource from a bulk_move range */
+void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
+{
+ struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
+
+ if (unlikely(pos->first == res && pos->last == res)) {
+ pos->first = NULL;
+ pos->last = NULL;
+ } else if (pos->first == res) {
+ pos->first = list_next_entry(res, lru);
+ } else if (pos->last == res) {
+ pos->last = list_prev_entry(res, lru);
+ } else {
+ list_move(&res->lru, &pos->last->lru);
+ }
+}
+
+/* Move a resource to the LRU or bulk tail */
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
+{
+ struct ttm_buffer_object *bo = res->bo;
+ struct ttm_device *bdev = bo->bdev;
+
+ lockdep_assert_held(&bo->bdev->lru_lock);
+
+ if (bo->pin_count) {
+ list_move_tail(&res->lru, &bdev->pinned);
+
+ } else if (bo->bulk_move) {
+ struct ttm_lru_bulk_move_pos *pos =
+ ttm_lru_bulk_move_pos(bo->bulk_move, res);
+
+ ttm_lru_bulk_move_pos_tail(pos, res);
+ } else {
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ list_move_tail(&res->lru, &man->lru[bo->priority]);
+ }
+}
+
+/**
+ * ttm_resource_init - resource object constructure
+ * @bo: buffer object this resources is allocated for
+ * @place: placement of the resource
+ * @res: the resource object to inistilize
+ *
+ * Initialize a new resource object. Counterpart of ttm_resource_fini().
+ */
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res)
{
+ struct ttm_resource_manager *man;
+
res->start = 0;
res->num_pages = PFN_UP(bo->base.size);
res->mem_type = place->mem_type;
@@ -41,9 +168,42 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
res->bus.offset = 0;
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
+ res->bo = bo;
+ INIT_LIST_HEAD(&res->lru);
+
+ man = ttm_manager_type(bo->bdev, place->mem_type);
+ spin_lock(&bo->bdev->lru_lock);
+ man->usage += res->num_pages << PAGE_SHIFT;
+ if (bo->bulk_move)
+ ttm_lru_bulk_move_add(bo->bulk_move, res);
+ else
+ ttm_resource_move_to_lru_tail(res);
+ spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
+/**
+ * ttm_resource_fini - resource destructor
+ * @man: the resource manager this resource belongs to
+ * @res: the resource to clean up
+ *
+ * Should be used by resource manager backends to clean up the TTM resource
+ * objects before freeing the underlying structure. Makes sure the resource is
+ * removed from the LRU before destruction.
+ * Counterpart of ttm_resource_init().
+ */
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct ttm_device *bdev = man->bdev;
+
+ spin_lock(&bdev->lru_lock);
+ list_del_init(&res->lru);
+ man->usage -= res->num_pages << PAGE_SHIFT;
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_resource_fini);
+
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res_ptr)
@@ -61,6 +221,12 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
if (!*res)
return;
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_lru_bulk_move_del(bo->bulk_move, *res);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
@@ -116,21 +282,33 @@ bool ttm_resource_compat(struct ttm_resource *res,
}
EXPORT_SYMBOL(ttm_resource_compat);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ res->bo = bo;
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
/**
* ttm_resource_manager_init
*
* @man: memory manager object to init
- * @p_size: size managed area in pages.
+ * @bdev: ttm device this manager belongs to
+ * @size: size of managed resources in arbitrary units
*
* Initialise core parts of a manager object.
*/
void ttm_resource_manager_init(struct ttm_resource_manager *man,
- unsigned long p_size)
+ struct ttm_device *bdev,
+ uint64_t size)
{
unsigned i;
spin_lock_init(&man->move_lock);
- man->size = p_size;
+ man->bdev = bdev;
+ man->size = size;
+ man->usage = 0;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
@@ -192,6 +370,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
EXPORT_SYMBOL(ttm_resource_manager_evict_all);
/**
+ * ttm_resource_manager_usage
+ *
+ * @man: A memory manager object.
+ *
+ * Return how many resources are currently used.
+ */
+uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
+{
+ uint64_t usage;
+
+ spin_lock(&man->bdev->lru_lock);
+ usage = man->usage;
+ spin_unlock(&man->bdev->lru_lock);
+ return usage;
+}
+EXPORT_SYMBOL(ttm_resource_manager_usage);
+
+/**
* ttm_resource_manager_debug
*
* @man: manager type to dump.
@@ -203,13 +399,65 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
drm_printf(p, " use_type: %d\n", man->use_type);
drm_printf(p, " use_tt: %d\n", man->use_tt);
drm_printf(p, " size: %llu\n", man->size);
+ drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
if (man->func->debug)
man->func->debug(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+/**
+ * ttm_resource_manager_first
+ *
+ * @man: resource manager to iterate over
+ * @cursor: cursor to record the position
+ *
+ * Returns the first resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_resource *res;
+
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
+ ++cursor->priority)
+ list_for_each_entry(res, &man->lru[cursor->priority], lru)
+ return res;
+
+ return NULL;
+}
+
+/**
+ * ttm_resource_manager_next
+ *
+ * @man: resource manager to iterate over
+ * @cursor: cursor to record the position
+ * @res: the current resource pointer
+ *
+ * Returns the next resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor,
+ struct ttm_resource *res)
+{
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
+ return res;
+
+ for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
+ ++cursor->priority)
+ list_for_each_entry(res, &man->lru[cursor->priority], lru)
+ return res;
+
+ return NULL;
+}
+
static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *dmap,
+ struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_iomap *iter_io =
@@ -236,11 +484,11 @@ retry:
addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
(((resource_size_t)i - iter_io->cache.i)
<< PAGE_SHIFT));
- dma_buf_map_set_vaddr_iomem(dmap, addr);
+ iosys_map_set_vaddr_iomem(dmap, addr);
}
static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *map)
+ struct iosys_map *map)
{
io_mapping_unmap_local(map->vaddr_iomem);
}
@@ -291,14 +539,14 @@ EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
*/
static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *dmap,
+ struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_linear_io *iter_io =
container_of(iter, typeof(*iter_io), base);
*dmap = iter_io->dmap;
- dma_buf_map_incr(dmap, i * PAGE_SIZE);
+ iosys_map_incr(dmap, i * PAGE_SIZE);
}
static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
@@ -334,7 +582,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
}
if (mem->bus.addr) {
- dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
+ iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
iter_io->needs_unmap = false;
} else {
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
@@ -342,23 +590,23 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
iter_io->needs_unmap = true;
memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
if (mem->bus.caching == ttm_write_combined)
- dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
- ioremap_wc(mem->bus.offset,
- bus_size));
+ iosys_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap_wc(mem->bus.offset,
+ bus_size));
else if (mem->bus.caching == ttm_cached)
- dma_buf_map_set_vaddr(&iter_io->dmap,
- memremap(mem->bus.offset, bus_size,
- MEMREMAP_WB |
- MEMREMAP_WT |
- MEMREMAP_WC));
+ iosys_map_set_vaddr(&iter_io->dmap,
+ memremap(mem->bus.offset, bus_size,
+ MEMREMAP_WB |
+ MEMREMAP_WT |
+ MEMREMAP_WC));
/* If uncached requested or if mapping cached or wc failed */
- if (dma_buf_map_is_null(&iter_io->dmap))
- dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
- ioremap(mem->bus.offset,
- bus_size));
+ if (iosys_map_is_null(&iter_io->dmap))
+ iosys_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap(mem->bus.offset,
+ bus_size));
- if (dma_buf_map_is_null(&iter_io->dmap)) {
+ if (iosys_map_is_null(&iter_io->dmap)) {
ret = -ENOMEM;
goto out_io_free;
}
@@ -387,7 +635,7 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
struct ttm_device *bdev,
struct ttm_resource *mem)
{
- if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
+ if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) {
if (iter_io->dmap.is_iomem)
iounmap(iter_io->dmap.vaddr_iomem);
else
@@ -396,3 +644,37 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
ttm_mem_io_free(bdev, mem);
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int ttm_resource_manager_show(struct seq_file *m, void *unused)
+{
+ struct ttm_resource_manager *man =
+ (struct ttm_resource_manager *)m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ ttm_resource_manager_debug(man, &p);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager);
+
+#endif
+
+/**
+ * ttm_resource_manager_create_debugfs - Create debugfs entry for specified
+ * resource manager.
+ * @man: The TTM resource manager for which the debugfs stats file be creates
+ * @parent: debugfs directory in which the file will reside
+ * @name: The filename to create.
+ *
+ * This function setups up a debugfs file that can be used to look
+ * at debug statistics of the specified ttm_resource_manager.
+ */
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops);
+#endif
+}
+EXPORT_SYMBOL(ttm_resource_manager_create_debugfs);