diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 21:34:42 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 21:34:42 +0100 |
commit | 8e22e1b3499a446df48c2b26667ca36c55bf864c (patch) | |
tree | 5329f98b3eb3c95a9dcbab0fa4f9b6e62f0e788d /drivers/gpu/drm/i915/i915_gem_internal.c | |
parent | 00d3c14f14d51babd8aeafd5fa734ccf04f5ca3d (diff) | |
parent | 64a577196d66b44e37384bc5c4d78c61f59d5b2a (diff) |
Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed
drivers. Otherwise we'll have chaos even before 4.12 started in
earnest.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_internal.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_internal.c | 47 |
1 files changed, 32 insertions, 15 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 17ce53d0d092..933019e1b206 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -46,16 +46,39 @@ static struct sg_table * i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned int npages = obj->base.size / PAGE_SIZE; struct sg_table *st; struct scatterlist *sg; + unsigned int npages; int max_order; gfp_t gfp; + max_order = MAX_ORDER; +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + unsigned int max_segment; + + max_segment = swiotlb_max_segment(); + if (max_segment) { + max_segment = max_t(unsigned int, max_segment, + PAGE_SIZE) >> PAGE_SHIFT; + max_order = min(max_order, ilog2(max_segment)); + } + } +#endif + + gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; + if (IS_I965GM(i915) || IS_I965G(i915)) { + /* 965gm cannot relocate objects above 4GiB. */ + gfp &= ~__GFP_HIGHMEM; + gfp |= __GFP_DMA32; + } + +create_st: st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return ERR_PTR(-ENOMEM); + npages = obj->base.size / PAGE_SIZE; if (sg_alloc_table(st, npages, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); @@ -64,19 +87,6 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) sg = st->sgl; st->nents = 0; - max_order = MAX_ORDER; -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ - max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); -#endif - - gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; - if (IS_I965GM(i915) || IS_I965G(i915)) { - /* 965gm cannot relocate objects above 4GiB. */ - gfp &= ~__GFP_HIGHMEM; - gfp |= __GFP_DMA32; - } - do { int order = min(fls(npages) - 1, max_order); struct page *page; @@ -104,8 +114,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) sg = __sg_next(sg); } while (1); - if (i915_gem_gtt_prepare_pages(obj, st)) + if (i915_gem_gtt_prepare_pages(obj, st)) { + /* Failed to dma-map try again with single page sg segments */ + if (get_order(st->sgl->length)) { + internal_free_pages(st); + max_order = 0; + goto create_st; + } goto err; + } /* Mark the pages as dontneed whilst they are still pinned. As soon * as they are unpinned they are allowed to be reaped by the shrinker, |