diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gpu_error.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 438 |
1 files changed, 218 insertions, 220 deletions
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index eecb87063c88..41ec7a183c73 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -42,16 +42,6 @@ static const char *engine_str(int engine) } } -static const char *pin_flag(int pinned) -{ - if (pinned > 0) - return " P"; - else if (pinned < 0) - return " p"; - else - return ""; -} - static const char *tiling_flag(int tiling) { switch (tiling) { @@ -189,7 +179,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, { int i; - err_printf(m, " %s [%d]:\n", name, count); + err_printf(m, "%s [%d]:\n", name, count); while (count--) { err_printf(m, " %08x_%08x %8u %02x %02x [ ", @@ -202,7 +192,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, err_printf(m, "%02x ", err->rseqno[i]); err_printf(m, "] %02x", err->wseqno); - err_puts(m, pin_flag(err->pinned)); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); @@ -247,14 +236,23 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, err_printf(m, " HEAD: 0x%08x\n", ee->head); err_printf(m, " TAIL: 0x%08x\n", ee->tail); err_printf(m, " CTL: 0x%08x\n", ee->ctl); + err_printf(m, " MODE: 0x%08x\n", ee->mode); err_printf(m, " HWS: 0x%08x\n", ee->hws); err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ee->acthd>>32), (u32)ee->acthd); err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone); + if (ee->batchbuffer) { + u64 start = ee->batchbuffer->gtt_offset; + u64 end = start + ee->batchbuffer->gtt_size; + + err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", + upper_32_bits(start), lower_32_bits(start), + upper_32_bits(end), lower_32_bits(end)); + } if (INTEL_GEN(m->i915) >= 4) { - err_printf(m, " BBADDR: 0x%08x %08x\n", + err_printf(m, " BBADDR: 0x%08x_%08x\n", (u32)(ee->bbaddr>>32), (u32)ee->bbaddr); err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate); err_printf(m, " INSTPS: 0x%08x\n", ee->instps); @@ -323,6 +321,16 @@ static void print_error_obj(struct drm_i915_error_state_buf *m, } } +static void err_print_capabilities(struct drm_i915_error_state_buf *m, + const struct intel_device_info *info) +{ +#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x)) +#define SEP_SEMICOLON ; + DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); +#undef PRINT_FLAG +#undef SEP_SEMICOLON +} + int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { @@ -342,6 +350,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); err_printf(m, "Kernel: " UTS_RELEASE "\n"); + err_print_capabilities(m, &error->device_info); max_hangcheck_score = 0; for (i = 0; i < ARRAY_SIZE(error->engine); i++) { if (error->engine[i].hangcheck_score > max_hangcheck_score) @@ -414,18 +423,33 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, error_print_engine(m, &error->engine[i]); } - for (i = 0; i < error->vm_count; i++) { - err_printf(m, "vm[%d]\n", i); + for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) { + char buf[128]; + int len, first = 1; + + if (!error->active_vm[i]) + break; + + len = scnprintf(buf, sizeof(buf), "Active ("); + for (j = 0; j < ARRAY_SIZE(error->engine); j++) { + if (error->engine[j].vm != error->active_vm[i]) + continue; - print_error_buffers(m, "Active", + len += scnprintf(buf + len, sizeof(buf), "%s%s", + first ? "" : ", ", + dev_priv->engine[j].name); + first = 0; + } + scnprintf(buf + len, sizeof(buf), ")"); + print_error_buffers(m, buf, error->active_bo[i], error->active_bo_count[i]); - - print_error_buffers(m, "Pinned", - error->pinned_bo[i], - error->pinned_bo_count[i]); } + print_error_buffers(m, "Pinned (global)", + error->pinned_bo, + error->pinned_bo_count); + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { struct drm_i915_error_engine *ee = &error->engine[i]; @@ -455,9 +479,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, dev_priv->engine[i].name, ee->num_requests); for (j = 0; j < ee->num_requests; j++) { - err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", + err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n", + ee->requests[j].pid, ee->requests[j].seqno, ee->requests[j].jiffies, + ee->requests[j].head, ee->requests[j].tail); } } @@ -533,7 +559,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } - if ((obj = error->semaphore_obj)) { + if ((obj = error->semaphore)) { err_printf(m, "Semaphore page = 0x%08x\n", lower_32_bits(obj->gtt_offset)); for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { @@ -624,15 +650,12 @@ static void i915_error_state_free(struct kref *error_ref) kfree(ee->waiters); } - i915_error_object_free(error->semaphore_obj); + i915_error_object_free(error->semaphore); - for (i = 0; i < error->vm_count; i++) + for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) kfree(error->active_bo[i]); - - kfree(error->active_bo); - kfree(error->active_bo_count); kfree(error->pinned_bo); - kfree(error->pinned_bo_count); + kfree(error->overlay); kfree(error->display); kfree(error); @@ -640,46 +663,45 @@ static void i915_error_state_free(struct kref *error_ref) static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *src, - struct i915_address_space *vm) + struct i915_vma *vma) { struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_gem_object *src; struct drm_i915_error_object *dst; - struct i915_vma *vma = NULL; int num_pages; bool use_ggtt; int i = 0; u64 reloc_offset; - if (src == NULL || src->pages == NULL) + if (!vma) + return NULL; + + src = vma->obj; + if (!src->pages) return NULL; num_pages = src->base.size >> PAGE_SHIFT; dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); - if (dst == NULL) + if (!dst) return NULL; - if (i915_gem_obj_bound(src, vm)) - dst->gtt_offset = i915_gem_obj_offset(src, vm); - else - dst->gtt_offset = -1; + dst->gtt_offset = vma->node.start; + dst->gtt_size = vma->node.size; reloc_offset = dst->gtt_offset; - if (i915_is_ggtt(vm)) - vma = i915_gem_obj_to_ggtt(src); use_ggtt = (src->cache_level == I915_CACHE_NONE && - vma && (vma->flags & I915_VMA_GLOBAL_BIND) && + (vma->flags & I915_VMA_GLOBAL_BIND) && reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end); /* Cannot access stolen address directly, try to use the aperture */ if (src->stolen) { use_ggtt = true; - if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND)) + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) goto unwind; - reloc_offset = i915_gem_obj_ggtt_offset(src); + reloc_offset = vma->node.start; if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end) goto unwind; } @@ -707,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, * captures what the GPU read. */ - s = io_mapping_map_atomic_wc(ggtt->mappable, + s = io_mapping_map_atomic_wc(&ggtt->mappable, reloc_offset); memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); @@ -739,8 +761,6 @@ unwind: kfree(dst); return NULL; } -#define i915_error_ggtt_object_create(dev_priv, src) \ - i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base) /* The error capture is special as tries to run underneath the normal * locking rules - so we use the raw version of the i915_gem_active lookup. @@ -777,10 +797,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->gtt_offset = vma->node.start; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; - err->fence_reg = obj->fence_reg; - err->pinned = 0; - if (i915_gem_obj_is_pinned(obj)) - err->pinned = 1; + err->fence_reg = vma->fence ? vma->fence->id : -1; err->tiling = i915_gem_object_get_tiling(obj); err->dirty = obj->dirty; err->purgeable = obj->madv != I915_MADV_WILLNEED; @@ -788,13 +805,17 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->cache_level = obj->cache_level; } -static u32 capture_active_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) +static u32 capture_error_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head, + bool pinned_only) { struct i915_vma *vma; int i = 0; list_for_each_entry(vma, head, vm_link) { + if (pinned_only && !i915_vma_is_pinned(vma)) + continue; + capture_bo(err++, vma); if (++i == count) break; @@ -803,28 +824,6 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, return i; } -static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head, - struct i915_address_space *vm) -{ - struct drm_i915_gem_object *obj; - struct drm_i915_error_buffer * const first = err; - struct drm_i915_error_buffer * const last = err + count; - - list_for_each_entry(obj, head, global_list) { - struct i915_vma *vma; - - if (err == last) - break; - - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == vm && i915_vma_is_pinned(vma)) - capture_bo(err++, vma); - } - - return err - first; -} - /* Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by * grossly estimating a GPU error state. @@ -884,7 +883,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error, struct intel_engine_cs *to; enum intel_engine_id id; - if (!error->semaphore_obj) + if (!error->semaphore) return; for_each_engine_id(to, dev_priv, id) { @@ -897,7 +896,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error, signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4; - tmp = error->semaphore_obj->pages[0]; + tmp = error->semaphore->pages[0]; idx = intel_engine_sync_index(engine, to); ee->semaphore_mboxes[idx] = tmp[signal_offset]; @@ -1007,6 +1006,8 @@ static void error_record_engine_registers(struct drm_i915_error_state *error, ee->head = I915_READ_HEAD(engine); ee->tail = I915_READ_TAIL(engine); ee->ctl = I915_READ_CTL(engine); + if (INTEL_GEN(dev_priv) > 2) + ee->mode = I915_READ_MODE(engine); if (I915_NEED_GFX_HWS(dev_priv)) { i915_reg_t mmio; @@ -1062,45 +1063,76 @@ static void error_record_engine_registers(struct drm_i915_error_state *error, } } - -static void i915_gem_record_active_context(struct intel_engine_cs *engine, - struct drm_i915_error_state *error, - struct drm_i915_error_engine *ee) +static void engine_record_requests(struct intel_engine_cs *engine, + struct drm_i915_gem_request *first, + struct drm_i915_error_engine *ee) { - struct drm_i915_private *dev_priv = engine->i915; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_request *request; + int count; - /* Currently render ring is the only HW context user */ - if (engine->id != RCS || !error->ccid) + count = 0; + request = first; + list_for_each_entry_from(request, &engine->request_list, link) + count++; + if (!count) return; - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!i915_gem_obj_ggtt_bound(obj)) - continue; + ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC); + if (!ee->requests) + return; + + ee->num_requests = count; - if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { - ee->ctx = i915_error_ggtt_object_create(dev_priv, obj); + count = 0; + request = first; + list_for_each_entry_from(request, &engine->request_list, link) { + struct drm_i915_error_request *erq; + + if (count >= ee->num_requests) { + /* + * If the ring request list was changed in + * between the point where the error request + * list was created and dimensioned and this + * point then just exit early to avoid crashes. + * + * We don't need to communicate that the + * request list changed state during error + * state capture and that the error state is + * slightly incorrect as a consequence since we + * are typically only interested in the request + * list state at the point of error state + * capture, not in any changes happening during + * the capture. + */ break; } + + erq = &ee->requests[count++]; + erq->seqno = request->fence.seqno; + erq->jiffies = request->emitted_jiffies; + erq->head = request->head; + erq->tail = request->tail; + + rcu_read_lock(); + erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; + rcu_read_unlock(); } + ee->num_requests = count; } static void i915_gem_record_rings(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct drm_i915_gem_request *request; - int i, count; + int i; - if (dev_priv->semaphore_obj) { - error->semaphore_obj = - i915_error_ggtt_object_create(dev_priv, - dev_priv->semaphore_obj); - } + error->semaphore = + i915_error_object_create(dev_priv, dev_priv->semaphore); for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = &dev_priv->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; + struct drm_i915_gem_request *request; ee->pid = -1; ee->engine_id = -1; @@ -1115,10 +1147,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, request = i915_gem_find_active_request(engine); if (request) { - struct i915_address_space *vm; struct intel_ring *ring; + struct pid *pid; - vm = request->ctx->ppgtt ? + ee->vm = request->ctx->ppgtt ? &request->ctx->ppgtt->base : &ggtt->base; /* We need to copy these to an anonymous buffer @@ -1127,19 +1159,23 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, */ ee->batchbuffer = i915_error_object_create(dev_priv, - request->batch_obj, - vm); + request->batch); if (HAS_BROKEN_CS_TLB(dev_priv)) ee->wa_batchbuffer = - i915_error_ggtt_object_create(dev_priv, - engine->scratch.obj); + i915_error_object_create(dev_priv, + engine->scratch); + + ee->ctx = + i915_error_object_create(dev_priv, + request->ctx->engine[i].state); - if (request->pid) { + pid = request->ctx->pid; + if (pid) { struct task_struct *task; rcu_read_lock(); - task = pid_task(request->pid, PIDTYPE_PID); + task = pid_task(pid, PIDTYPE_PID); if (task) { strcpy(ee->comm, task->comm); ee->pid = task->pid; @@ -1154,145 +1190,102 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; ee->ringbuffer = - i915_error_ggtt_object_create(dev_priv, - ring->obj); - } - - ee->hws_page = - i915_error_ggtt_object_create(dev_priv, - engine->status_page.obj); + i915_error_object_create(dev_priv, ring->vma); - ee->wa_ctx = i915_error_ggtt_object_create(dev_priv, - engine->wa_ctx.obj); - - i915_gem_record_active_context(engine, error, ee); - - count = 0; - list_for_each_entry(request, &engine->request_list, link) - count++; - - ee->num_requests = count; - ee->requests = - kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC); - if (!ee->requests) { - ee->num_requests = 0; - continue; + engine_record_requests(engine, request, ee); } - count = 0; - list_for_each_entry(request, &engine->request_list, link) { - struct drm_i915_error_request *erq; - - if (count >= ee->num_requests) { - /* - * If the ring request list was changed in - * between the point where the error request - * list was created and dimensioned and this - * point then just exit early to avoid crashes. - * - * We don't need to communicate that the - * request list changed state during error - * state capture and that the error state is - * slightly incorrect as a consequence since we - * are typically only interested in the request - * list state at the point of error state - * capture, not in any changes happening during - * the capture. - */ - break; - } + ee->hws_page = + i915_error_object_create(dev_priv, + engine->status_page.vma); - erq = &ee->requests[count++]; - erq->seqno = request->fence.seqno; - erq->jiffies = request->emitted_jiffies; - erq->tail = request->postfix; - } + ee->wa_ctx = + i915_error_object_create(dev_priv, engine->wa_ctx.vma); } } -/* FIXME: Since pin count/bound list is global, we duplicate what we capture per - * VM. - */ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct i915_address_space *vm, - const int ndx) + int idx) { - struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; - struct drm_i915_gem_object *obj; + struct drm_i915_error_buffer *active_bo; struct i915_vma *vma; - int i; + int count; - i = 0; + count = 0; list_for_each_entry(vma, &vm->active_list, vm_link) - i++; - error->active_bo_count[ndx] = i; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == vm && i915_vma_is_pinned(vma)) - i++; - } - error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; - - if (i) { - active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC); - if (active_bo) - pinned_bo = active_bo + error->active_bo_count[ndx]; - } + count++; + active_bo = NULL; + if (count) + active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); if (active_bo) - error->active_bo_count[ndx] = - capture_active_bo(active_bo, - error->active_bo_count[ndx], - &vm->active_list); - - if (pinned_bo) - error->pinned_bo_count[ndx] = - capture_pinned_bo(pinned_bo, - error->pinned_bo_count[ndx], - &dev_priv->mm.bound_list, vm); - error->active_bo[ndx] = active_bo; - error->pinned_bo[ndx] = pinned_bo; + count = capture_error_bo(active_bo, count, &vm->active_list, false); + else + count = 0; + + error->active_vm[idx] = vm; + error->active_bo[idx] = active_bo; + error->active_bo_count[idx] = count; } -static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, - struct drm_i915_error_state *error) +static void i915_capture_active_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) { - struct i915_address_space *vm; - int cnt = 0, i = 0; - - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - cnt++; - - error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); - error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); - error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), - GFP_ATOMIC); - error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), - GFP_ATOMIC); - - if (error->active_bo == NULL || - error->pinned_bo == NULL || - error->active_bo_count == NULL || - error->pinned_bo_count == NULL) { - kfree(error->active_bo); - kfree(error->active_bo_count); - kfree(error->pinned_bo); - kfree(error->pinned_bo_count); - - error->active_bo = NULL; - error->active_bo_count = NULL; - error->pinned_bo = NULL; - error->pinned_bo_count = NULL; - } else { - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - i915_gem_capture_vm(dev_priv, error, vm, i++); + int cnt = 0, i, j; + + BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo)); + BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm)); + BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count)); + + /* Scan each engine looking for unique active contexts/vm */ + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + struct drm_i915_error_engine *ee = &error->engine[i]; + bool found; + + if (!ee->vm) + continue; - error->vm_count = cnt; + found = false; + for (j = 0; j < i && !found; j++) + found = error->engine[j].vm == ee->vm; + if (!found) + i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++); } } +static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct i915_address_space *vm = &dev_priv->ggtt.base; + struct drm_i915_error_buffer *bo; + struct i915_vma *vma; + int count_inactive, count_active; + + count_inactive = 0; + list_for_each_entry(vma, &vm->active_list, vm_link) + count_inactive++; + + count_active = 0; + list_for_each_entry(vma, &vm->inactive_list, vm_link) + count_active++; + + bo = NULL; + if (count_inactive + count_active) + bo = kcalloc(count_inactive + count_active, + sizeof(*bo), GFP_ATOMIC); + if (!bo) + return; + + count_inactive = capture_error_bo(bo, count_inactive, + &vm->active_list, true); + count_active = capture_error_bo(bo + count_inactive, count_active, + &vm->inactive_list, true); + error->pinned_bo_count = count_inactive + count_active; + error->pinned_bo = bo; +} + /* Capture all registers which don't fit into another category. */ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) @@ -1403,6 +1396,10 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, #endif error->reset_count = i915_reset_count(&dev_priv->gpu_error); error->suspend_count = dev_priv->suspend_count; + + memcpy(&error->device_info, + INTEL_INFO(dev_priv), + sizeof(error->device_info)); } /** @@ -1436,9 +1433,10 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); - i915_gem_capture_buffers(dev_priv, error); i915_gem_record_fences(dev_priv, error); i915_gem_record_rings(dev_priv, error); + i915_capture_active_buffers(dev_priv, error); + i915_capture_pinned_buffers(dev_priv, error); do_gettimeofday(&error->time); |