diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
26 files changed, 1060 insertions, 699 deletions
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index 271fb46d4dd0..ea8324abc784 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ fb_decoder.o dmabuf.o page_track.o -ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) +ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/ i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 1fa2f65c3cd1..771420453f82 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -35,6 +35,7 @@ */ #include "i915_drv.h" +#include "i915_gem_fence_reg.h" #include "gvt.h" static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) @@ -60,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) flags = PIN_MAPPABLE; } - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); mmio_hw_access_pre(dev_priv); ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); mmio_hw_access_post(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", high_gm ? "high" : "low"); @@ -97,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu) return 0; out_free_aperture: - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); return ret; } @@ -107,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); } /** @@ -128,10 +129,10 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct i915_fence_reg *reg; i915_reg_t fence_reg_lo, fence_reg_hi; - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); if (WARN_ON(fence >= vgpu_fence_sz(vgpu))) return; @@ -163,40 +164,41 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct i915_fence_reg *reg; u32 i; if (WARN_ON(!vgpu_fence_sz(vgpu))) return; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct i915_fence_reg *reg; int i; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(rpm); /* Request fences from host */ - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { - reg = i915_reserve_fence(dev_priv); + reg = i915_reserve_fence(&dev_priv->ggtt); if (IS_ERR(reg)) goto out_free_fence; @@ -205,8 +207,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + mutex_unlock(&dev_priv->ggtt.vm.mutex); + intel_runtime_pm_put_unchecked(rpm); return 0; out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); @@ -218,8 +220,8 @@ out_free_fence: i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + mutex_unlock(&dev_priv->ggtt.vm.mutex); + intel_runtime_pm_put_unchecked(rpm); return -ENOSPC; } @@ -315,9 +317,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); _clear_vgpu_fence(vgpu); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 3592d04c33b2..6a3ac8cde95d 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -35,7 +35,9 @@ */ #include <linux/slab.h> + #include "i915_drv.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" @@ -374,29 +376,45 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s); #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4)) #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5)) +#define DWORD_FIELD(dword, end, start) \ + FIELD_GET(GENMASK(end, start), cmd_val(s, dword)) + +#define OP_LENGTH_BIAS 2 +#define CMD_LEN(value) (value + OP_LENGTH_BIAS) + +static int gvt_check_valid_cmd_length(int len, int valid_len) +{ + if (valid_len != len) { + gvt_err("len is not valid: len=%u valid_len=%u\n", + len, valid_len); + return -EFAULT; + } + return 0; +} + struct cmd_info { const char *name; u32 opcode; -#define F_LEN_MASK (1U<<0) +#define F_LEN_MASK 3U #define F_LEN_CONST 1U #define F_LEN_VAR 0U +/* value is const although LEN maybe variable */ +#define F_LEN_VAR_FIXED (1<<1) /* * command has its own ip advance logic * e.g. MI_BATCH_START, MI_BATCH_END */ -#define F_IP_ADVANCE_CUSTOM (1<<1) - -#define F_POST_HANDLE (1<<2) +#define F_IP_ADVANCE_CUSTOM (1<<2) u32 flag; -#define R_RCS (1 << RCS) -#define R_VCS1 (1 << VCS) -#define R_VCS2 (1 << VCS2) +#define R_RCS BIT(RCS0) +#define R_VCS1 BIT(VCS0) +#define R_VCS2 BIT(VCS1) #define R_VCS (R_VCS1 | R_VCS2) -#define R_BCS (1 << BCS) -#define R_VECS (1 << VECS) +#define R_BCS BIT(BCS0) +#define R_VECS BIT(VECS0) #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) /* rings that support this cmd: BLT/RCS/VCS/VECS */ u16 rings; @@ -418,9 +436,12 @@ struct cmd_info { * flag == F_LEN_VAR : length bias bits * Note: length is in DWord */ - u8 len; + u32 len; parser_cmd_handler handler; + + /* valid length in DWord */ + u32 valid_len; }; struct cmd_entry { @@ -558,7 +579,7 @@ static const struct decode_info decode_info_vebox = { }; static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { - [RCS] = { + [RCS0] = { &decode_info_mi, NULL, NULL, @@ -569,7 +590,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VCS] = { + [VCS0] = { &decode_info_mi, NULL, NULL, @@ -580,7 +601,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [BCS] = { + [BCS0] = { &decode_info_mi, NULL, &decode_info_2d, @@ -591,7 +612,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VECS] = { + [VECS0] = { &decode_info_mi, NULL, NULL, @@ -602,7 +623,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VCS2] = { + [VCS1] = { &decode_info_mi, NULL, NULL, @@ -631,8 +652,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e; hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { - if ((opcode == e->info->opcode) && - (e->info->rings & (1 << ring_id))) + if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) return e->info; } return NULL; @@ -897,12 +917,16 @@ static int cmd_reg_handler(struct parser_exec_state *s, } /* TODO - * Right now only scan LRI command on KBL and in inhibit context. - * It's good enough to support initializing mmio by lri command in - * vgpu inhibit context on KBL. + * In order to let workload with inhibit context to generate + * correct image data into memory, vregs values will be loaded to + * hw via LRIs in the workload with inhibit context. But as + * indirect context is loaded prior to LRIs in workload, we don't + * want reg values specified in indirect context overwritten by + * LRIs in workloads. So, when scanning an indirect context, we + * update reg values in it into vregs, so LRIs in workload with + * inhibit context will restore with correct values */ - if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv) - || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) && + if (IS_GEN(gvt->dev_priv, 9) && intel_gvt_mmio_is_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { intel_gvt_hypervisor_read_gpa(s->vgpu, @@ -941,17 +965,26 @@ static int cmd_handler_lri(struct parser_exec_state *s) int i, ret = 0; int cmd_len = cmd_length(s); struct intel_gvt *gvt = s->vgpu->gvt; + u32 valid_len = CMD_LEN(1); + + /* + * Official intel docs are somewhat sloppy , check the definition of + * MI_LOAD_REGISTER_IMM. + */ + #define MAX_VALID_LEN 127 + if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) { + gvt_err("len is not valid: len=%u valid_len=%u\n", + cmd_len, valid_len); + return -EFAULT; + } for (i = 1; i < cmd_len; i += 2) { - if (IS_BROADWELL(gvt->dev_priv) && - (s->ring_id != RCS)) { - if (s->ring_id == BCS && - cmd_reg(s, i) == - i915_mmio_reg_offset(DERRMR)) + if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { + if (s->ring_id == BCS0 && + cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) ret |= 0; else - ret |= (cmd_reg_inhibit(s, i)) ? - -EBADRQC : 0; + ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; } if (ret) break; @@ -1047,27 +1080,27 @@ struct cmd_interrupt_event { }; static struct cmd_interrupt_event cmd_interrupt_events[] = { - [RCS] = { + [RCS0] = { .pipe_control_notify = RCS_PIPE_CONTROL, .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, .mi_user_interrupt = RCS_MI_USER_INTERRUPT, }, - [BCS] = { + [BCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = BCS_MI_FLUSH_DW, .mi_user_interrupt = BCS_MI_USER_INTERRUPT, }, - [VCS] = { + [VCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS_MI_FLUSH_DW, .mi_user_interrupt = VCS_MI_USER_INTERRUPT, }, - [VCS2] = { + [VCS1] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS2_MI_FLUSH_DW, .mi_user_interrupt = VCS2_MI_USER_INTERRUPT, }, - [VECS] = { + [VECS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VECS_MI_FLUSH_DW, .mi_user_interrupt = VECS_MI_USER_INTERRUPT, @@ -1081,6 +1114,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) bool index_mode = false; unsigned int post_sync; int ret = 0; + u32 hws_pga, val; post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; @@ -1104,6 +1138,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) index_mode = true; ret |= cmd_address_audit(s, gma, sizeof(u64), index_mode); + if (ret) + return ret; + if (index_mode) { + hws_pga = s->vgpu->hws_pga[s->ring_id]; + gma = hws_pga + gma; + patch_value(s, cmd_ptr(s, 2), gma); + val = cmd_val(s, 1) & (~(1 << 21)); + patch_value(s, cmd_ptr(s, 1), val); + } } } } @@ -1321,8 +1364,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip( info->tile_val << 10); } - vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; - intel_vgpu_trigger_virtual_event(vgpu, info->event); + if (info->plane == PLANE_PRIMARY) + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++; + + if (info->async_flip) + intel_vgpu_trigger_virtual_event(vgpu, info->event); + else + set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]); + return 0; } @@ -1359,6 +1408,15 @@ static int cmd_handler_mi_display_flip(struct parser_exec_state *s) int ret; int i; int len = cmd_length(s); + u32 valid_len = CMD_LEN(1); + + /* Flip Type == Stereo 3D Flip */ + if (DWORD_FIELD(2, 1, 0) == 2) + valid_len++; + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) + return ret; ret = decode_mi_display_flip(s, &info); if (ret) { @@ -1478,12 +1536,21 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) int op_size = (cmd_length(s) - 3) * sizeof(u32); int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; unsigned long gma, gma_low, gma_high; + u32 valid_len = CMD_LEN(2); int ret = 0; /* check ppggt */ if (!(cmd_val(s, 0) & (1 << 22))) return 0; + /* check if QWORD */ + if (DWORD_FIELD(0, 21, 21)) + valid_len++; + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) + return ret; + gma = cmd_val(s, 2) & GENMASK(31, 2); if (gmadr_bytes == 8) { @@ -1526,11 +1593,20 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s) int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * sizeof(u32); unsigned long gma, gma_high; + u32 valid_len = CMD_LEN(1); int ret = 0; if (!(cmd_val(s, 0) & (1 << 22))) return ret; + /* check if QWORD */ + if (DWORD_FIELD(0, 20, 19) == 1) + valid_len += 8; + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) + return ret; + gma = cmd_val(s, 1) & GENMASK(31, 2); if (gmadr_bytes == 8) { gma_high = cmd_val(s, 2) & GENMASK(15, 0); @@ -1567,6 +1643,17 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) unsigned long gma; bool index_mode = false; int ret = 0; + u32 hws_pga, val; + u32 valid_len = CMD_LEN(2); + + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) { + /* Check again for Qword */ + ret = gvt_check_valid_cmd_length(cmd_length(s), + ++valid_len); + return ret; + } /* Check post-sync and ppgtt bit */ if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { @@ -1577,6 +1664,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) if (cmd_val(s, 0) & (1 << 21)) index_mode = true; ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); + if (ret) + return ret; + if (index_mode) { + hws_pga = s->vgpu->hws_pga[s->ring_id]; + gma = hws_pga + gma; + patch_value(s, cmd_ptr(s, 1), gma); + val = cmd_val(s, 0) & (~(1 << 21)); + patch_value(s, cmd_ptr(s, 0), val); + } } /* Check notify bit */ if ((cmd_val(s, 0) & (1 << 8))) @@ -1635,7 +1731,9 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) return 1; } -static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) +static int find_bb_size(struct parser_exec_state *s, + unsigned long *bb_size, + unsigned long *bb_end_cmd_offset) { unsigned long gma = 0; const struct cmd_info *info; @@ -1647,6 +1745,7 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; *bb_size = 0; + *bb_end_cmd_offset = 0; /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); @@ -1682,6 +1781,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) /* chained batch buffer */ bb_end = true; } + + if (bb_end) + *bb_end_cmd_offset = *bb_size; + cmd_len = get_cmd_length(info, cmd) << 2; *bb_size += cmd_len; gma += cmd_len; @@ -1690,23 +1793,47 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) return 0; } +static int audit_bb_end(struct parser_exec_state *s, void *va) +{ + struct intel_vgpu *vgpu = s->vgpu; + u32 cmd = *(u32 *)va; + const struct cmd_info *info; + + info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + if (info == NULL) { + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", + cmd, get_opcode(cmd, s->ring_id), + (s->buf_addr_type == PPGTT_BUFFER) ? + "ppgtt" : "ggtt", s->ring_id, s->workload); + return -EBADRQC; + } + + if ((info->opcode == OP_MI_BATCH_BUFFER_END) || + ((info->opcode == OP_MI_BATCH_BUFFER_START) && + (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0))) + return 0; + + return -EBADRQC; +} + static int perform_bb_shadow(struct parser_exec_state *s) { struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu_shadow_bb *bb; unsigned long gma = 0; unsigned long bb_size; + unsigned long bb_end_cmd_offset; int ret = 0; struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; - unsigned long gma_start_offset = 0; + unsigned long start_offset = 0; /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); if (gma == INTEL_GVT_INVALID_ADDR) return -EFAULT; - ret = find_bb_size(s, &bb_size); + ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); if (ret) return ret; @@ -1716,7 +1843,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; - /* the gma_start_offset stores the batch buffer's start gma's + /* the start_offset stores the batch buffer's start gma's * offset relative to page boundary. so for non-privileged batch * buffer, the shadowed gem object holds exactly the same page * layout as original gem object. This is for the convience of @@ -1728,16 +1855,17 @@ static int perform_bb_shadow(struct parser_exec_state *s) * that of shadowed page. */ if (bb->ppgtt) - gma_start_offset = gma & ~I915_GTT_PAGE_MASK; + start_offset = gma & ~I915_GTT_PAGE_MASK; - bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv, - roundup(bb_size + gma_start_offset, PAGE_SIZE)); + bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv, + round_up(bb_size + start_offset, + PAGE_SIZE)); if (IS_ERR(bb->obj)) { ret = PTR_ERR(bb->obj); goto err_free_bb; } - ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush); + ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush); if (ret) goto err_free_obj; @@ -1754,13 +1882,17 @@ static int perform_bb_shadow(struct parser_exec_state *s) ret = copy_gma_to_hva(s->vgpu, mm, gma, gma + bb_size, - bb->va + gma_start_offset); + bb->va + start_offset); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); ret = -EFAULT; goto err_unmap; } + ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); + if (ret) + goto err_unmap; + INIT_LIST_HEAD(&bb->list); list_add(&bb->list, &s->workload->shadow_bb); @@ -1780,13 +1912,13 @@ static int perform_bb_shadow(struct parser_exec_state *s) * buffer's gma in pair. After all, we don't want to pin the shadow * buffer here (too early). */ - s->ip_va = bb->va + gma_start_offset; + s->ip_va = bb->va + start_offset; s->ip_gma = gma; return 0; err_unmap: i915_gem_object_unpin_map(bb->obj); err_finish_shmem_access: - i915_gem_obj_finish_shmem_access(bb->obj); + i915_gem_object_finish_access(bb->obj); err_free_obj: i915_gem_object_put(bb->obj); err_free_bb: @@ -1885,21 +2017,24 @@ static const struct cmd_info cmd_info[] = { {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, - {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE, + {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR, R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip}, - {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL, - 0, 8, NULL}, + {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED, + R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)}, {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL}, - {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, + {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, + D_ALL, 0, 8, NULL, CMD_LEN(0)}, - {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, - D_BDW_PLUS, 0, 8, NULL}, + {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8, + NULL, CMD_LEN(0)}, - {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, - D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, + {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2), + 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)}, {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm}, @@ -1913,8 +2048,9 @@ static const struct cmd_info cmd_info[] = { {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10, cmd_handler_mi_update_gtt}, - {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL, - D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm}, + {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, + cmd_handler_srm, CMD_LEN(2)}, {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6, cmd_handler_mi_flush_dw}, @@ -1922,26 +2058,30 @@ static const struct cmd_info cmd_info[] = { {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1), 10, cmd_handler_mi_clflush}, - {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL, - D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count}, + {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6, + cmd_handler_mi_report_perf_count, CMD_LEN(2)}, - {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL, - D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm}, + {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, + cmd_handler_lrm, CMD_LEN(2)}, - {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL, - D_ALL, 0, 8, cmd_handler_lrr}, + {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8, + cmd_handler_lrr, CMD_LEN(1)}, - {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS, - D_ALL, 0, 8, NULL}, + {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, + F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0, + 8, NULL, CMD_LEN(2)}, - {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL, - ADDR_FIX_1(2), 8, NULL}, + {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED, + R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)}, {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL}, - {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2), - 8, cmd_handler_mi_op_2e}, + {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, + ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)}, {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 8, cmd_handler_mi_op_2f}, @@ -1951,8 +2091,8 @@ static const struct cmd_info cmd_info[] = { cmd_handler_mi_batch_buffer_start}, {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END, - F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8, - cmd_handler_mi_conditional_batch_buffer_end}, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, + cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)}, {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST, R_RCS | R_BCS, D_ALL, 0, 2, NULL}, @@ -2504,7 +2644,7 @@ static const struct cmd_info cmd_info[] = { 0, 12, NULL}, {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS, - 0, 20, NULL}, + 0, 12, NULL}, }; static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) @@ -2542,6 +2682,13 @@ static int cmd_parser_exec(struct parser_exec_state *s) cmd_length(s), s->buf_type, s->buf_addr_type, s->workload, info->name); + if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) { + ret = gvt_check_valid_cmd_length(cmd_length(s), + info->valid_len); + if (ret) + return ret; + } + if (info->handler) { ret = info->handler(s); if (ret < 0) { @@ -2647,11 +2794,6 @@ static int scan_workload(struct intel_vgpu_workload *workload) gma_head == gma_tail) return 0; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; @@ -2697,11 +2839,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.workload = workload; s.is_ctx_wa = true; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; @@ -2803,9 +2940,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) int ret = 0; void *map; - obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv, - roundup(ctx_size + CACHELINE_BYTES, - PAGE_SIZE)); + obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv, + roundup(ctx_size + CACHELINE_BYTES, + PAGE_SIZE)); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -2817,7 +2954,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) goto put_obj; } + i915_gem_object_lock(obj); ret = i915_gem_object_set_to_cpu_domain(obj, false); + i915_gem_object_unlock(obj); if (ret) { gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); goto unmap_src; diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 2ec89bcb59f1..285f6011a537 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -58,12 +58,12 @@ static int mmio_offset_compare(void *priv, static inline int mmio_diff_handler(struct intel_gvt *gvt, u32 offset, void *data) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *i915 = gvt->dev_priv; struct mmio_diff_param *param = data; struct diff_mmio *node; u32 preg, vreg; - preg = I915_READ_NOTRACE(_MMIO(offset)); + preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset)); vreg = vgpu_vreg(param->vgpu, offset); if (preg != vreg) { @@ -189,36 +189,19 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops, /** * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. */ -int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) +void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) { - struct dentry *ent; - char name[10] = ""; + char name[16] = ""; - sprintf(name, "vgpu%d", vgpu->id); + snprintf(name, 16, "vgpu%d", vgpu->id); vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); - if (!vgpu->debugfs) - return -ENOMEM; - - ent = debugfs_create_bool("active", 0444, vgpu->debugfs, - &vgpu->active); - if (!ent) - return -ENOMEM; - - ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, - vgpu, &vgpu_mmio_diff_fops); - if (!ent) - return -ENOMEM; - ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, - vgpu, &vgpu_scan_nonprivbb_fops); - if (!ent) - return -ENOMEM; - - return 0; + debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active); + debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu, + &vgpu_mmio_diff_fops); + debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu, + &vgpu_scan_nonprivbb_fops); } /** @@ -234,27 +217,15 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) /** * intel_gvt_debugfs_init - register gvt debugfs root entry * @gvt: GVT device - * - * Returns: - * zero on success, negative if failed. */ -int intel_gvt_debugfs_init(struct intel_gvt *gvt) +void intel_gvt_debugfs_init(struct intel_gvt *gvt) { struct drm_minor *minor = gvt->dev_priv->drm.primary; - struct dentry *ent; gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); - if (!gvt->debugfs_root) { - gvt_err("Cannot create debugfs dir\n"); - return -ENOMEM; - } - ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root, - &gvt->mmio.num_tracked_mmio); - if (!ent) - return -ENOMEM; - - return 0; + debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root, + &gvt->mmio.num_tracked_mmio); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index e3f9caa7839f..e1c313da6c00 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) if (!pipe_is_enabled(vgpu, pipe)) continue; - vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; intel_vgpu_trigger_virtual_event(vgpu, event); } diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 5d887f7cc0d5..e451298d11c3 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -45,6 +45,7 @@ static int vgpu_gem_get_pages( int i, ret; gen8_pte_t __iomem *gtt_entries; struct intel_vgpu_fb_info *fb_info; + u32 page_num; fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; if (WARN_ON(!fb_info)) @@ -54,14 +55,15 @@ static int vgpu_gem_get_pages( if (unlikely(!st)) return -ENOMEM; - ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL); + page_num = obj->base.size >> PAGE_SHIFT; + ret = sg_alloc_table(st, page_num, GFP_KERNEL); if (ret) { kfree(st); return ret; } gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + (fb_info->start >> PAGE_SHIFT); - for_each_sg(st->sgl, sg, fb_info->size, i) { + for_each_sg(st->sgl, sg, page_num, i) { sg->offset = 0; sg->length = PAGE_SIZE; sg_dma_address(sg) = @@ -150,16 +152,17 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, struct intel_vgpu_fb_info *info) { + static struct lock_class_key lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return NULL; drm_gem_private_object_init(dev, &obj->base, - info->size << PAGE_SHIFT); - i915_gem_object_init(obj, &intel_vgpu_gem_ops); + roundup(info->size, PAGE_SIZE)); + i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; @@ -206,10 +209,11 @@ static int vgpu_get_plane_info(struct drm_device *dev, struct intel_vgpu_fb_info *info, int plane_id) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_vgpu_primary_plane_format p; struct intel_vgpu_cursor_plane_format c; - int ret; + int ret, tile_height = 1; + + memset(info, 0, sizeof(*info)); if (plane_id == DRM_PLANE_TYPE_PRIMARY) { ret = intel_vgpu_decode_primary_plane(vgpu, &p); @@ -228,12 +232,15 @@ static int vgpu_get_plane_info(struct drm_device *dev, break; case PLANE_CTL_TILED_X: info->drm_format_mod = I915_FORMAT_MOD_X_TILED; + tile_height = 8; break; case PLANE_CTL_TILED_Y: info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; + tile_height = 32; break; case PLANE_CTL_TILED_YF: info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; + tile_height = 32; break; default: gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); @@ -264,8 +271,7 @@ static int vgpu_get_plane_info(struct drm_device *dev, return -EINVAL; } - info->size = (info->stride * info->height + PAGE_SIZE - 1) - >> PAGE_SHIFT; + info->size = info->stride * roundup(info->height, tile_height); if (info->size == 0) { gvt_vgpu_err("fb size is zero\n"); return -EINVAL; @@ -275,11 +281,6 @@ static int vgpu_get_plane_info(struct drm_device *dev, gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); return -EFAULT; } - if (((info->start >> PAGE_SHIFT) + info->size) > - ggtt_total_entries(&dev_priv->ggtt)) { - gvt_vgpu_err("Invalid GTT offset or size\n"); - return -EFAULT; - } if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) { gvt_vgpu_err("invalid gma addr\n"); @@ -491,15 +492,13 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) obj->gvt_info = dmabuf_obj->info; - dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR); + dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR); if (IS_ERR(dmabuf)) { gvt_vgpu_err("export dma-buf failed\n"); ret = PTR_ERR(dmabuf); goto out_free_gem; } - i915_gem_object_put(obj); - ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); if (ret < 0) { gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret); @@ -524,6 +523,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) file_count(dmabuf->file), kref_read(&obj->base.refcount)); + i915_gem_object_put(obj); + return dmabuf_fd; out_free_dmabuf: diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 70494e394d2c..d6e7a1189bad 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -47,17 +47,16 @@ ((a)->lrca == (b)->lrca)) static int context_switch_events[] = { - [RCS] = RCS_AS_CONTEXT_SWITCH, - [BCS] = BCS_AS_CONTEXT_SWITCH, - [VCS] = VCS_AS_CONTEXT_SWITCH, - [VCS2] = VCS2_AS_CONTEXT_SWITCH, - [VECS] = VECS_AS_CONTEXT_SWITCH, + [RCS0] = RCS_AS_CONTEXT_SWITCH, + [BCS0] = BCS_AS_CONTEXT_SWITCH, + [VCS0] = VCS_AS_CONTEXT_SWITCH, + [VCS1] = VCS2_AS_CONTEXT_SWITCH, + [VECS0] = VECS_AS_CONTEXT_SWITCH, }; -static int ring_id_to_context_switch_event(int ring_id) +static int ring_id_to_context_switch_event(unsigned int ring_id) { - if (WARN_ON(ring_id < RCS || - ring_id >= ARRAY_SIZE(context_switch_events))) + if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) return -EINVAL; return context_switch_events[ring_id]; @@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) + if (workload->status || (vgpu->resetting_eng & BIT(ring_id))) goto out; if (!list_empty(workload_q_head(vgpu, ring_id))) { @@ -527,14 +526,15 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } -static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) +static void clean_execlist(struct intel_vgpu *vgpu, + intel_engine_mask_t engine_mask) { - unsigned int tmp; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; struct intel_vgpu_submission *s = &vgpu->submission; + intel_engine_mask_t tmp; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { kfree(s->ring_scan_buffer[engine->id]); s->ring_scan_buffer[engine->id] = NULL; s->ring_scan_buffer_size[engine->id] = 0; @@ -542,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) } static void reset_execlist(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; - unsigned int tmp; + intel_engine_mask_t tmp; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) init_vgpu_execlist(vgpu, engine->id); } static int init_execlist(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { reset_execlist(vgpu, engine_mask); return 0; diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 714d709829a2..5ccc2c695848 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id); void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, - unsigned long engine_mask); + intel_engine_mask_t engine_mask); #endif /*_GVT_EXECLIST_H_*/ diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 65e847392aea..8bb292b01271 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 4ac18b447247..049775e8e350 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -68,9 +68,10 @@ static struct bin_attribute firmware_attr = { static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *i915 = gvt->dev_priv; - *(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset)); + *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore, + _MMIO(offset)); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index cf133ef03873..4b04af569c05 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192; */ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) { - if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size - && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { - gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", - addr, size); - return false; - } - return true; + if (size == 0) + return vgpu_gmadr_is_valid(vgpu, addr); + + if (vgpu_gmadr_is_aperture(vgpu, addr) && + vgpu_gmadr_is_aperture(vgpu, addr + size - 1)) + return true; + else if (vgpu_gmadr_is_hidden(vgpu, addr) && + vgpu_gmadr_is_hidden(vgpu, addr + size - 1)) + return true; + + gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n", + addr, size); + return false; } /* translate a guest gmadr to host gmadr */ @@ -750,14 +756,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) { - struct intel_vgpu_ppgtt_spt *spt; + struct intel_vgpu_ppgtt_spt *spt, *spn; struct radix_tree_iter iter; - void **slot; + LIST_HEAD(all_spt); + void __rcu **slot; + rcu_read_lock(); radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { spt = radix_tree_deref_slot(slot); - ppgtt_free_spt(spt); + list_move(&spt->post_shadow_list, &all_spt); } + rcu_read_unlock(); + + list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list) + ppgtt_free_spt(spt); } static int ppgtt_handle_guest_write_page_table_bytes( @@ -805,7 +817,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); /* Allocate shadow page table without guest page. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( - struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) + struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct intel_vgpu_ppgtt_spt *spt = NULL; @@ -855,7 +867,7 @@ err_free_spt: /* Allocate shadow page table associated with specific gfn. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( - struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, + struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type, unsigned long gfn, bool guest_pde_ips) { struct intel_vgpu_ppgtt_spt *spt; @@ -930,13 +942,22 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; - intel_gvt_gtt_type_t cur_pt_type; + enum intel_gvt_gtt_type cur_pt_type; GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { - cur_pt_type = get_next_pt_type(e->type) + 1; + cur_pt_type = get_next_pt_type(e->type); + + if (!gtt_type_is_pt(cur_pt_type) || + !gtt_type_is_pt(cur_pt_type + 1)) { + WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type); + return -EINVAL; + } + + cur_pt_type += 1; + if (ops->get_pfn(e) == vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) return 0; @@ -1070,6 +1091,11 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( } else { int type = get_next_pt_type(we->type); + if (!gtt_type_is_pt(type)) { + ret = -EINVAL; + goto err; + } + spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); @@ -1091,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( err_free_spt: ppgtt_free_spt(spt); + spt = NULL; err: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", spt, we->val64, we->type); @@ -1849,7 +1876,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm) * Zero on success, negative error code in pointer if failed. */ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) { struct intel_gvt *gvt = vgpu->gvt; struct intel_vgpu_mm *mm; @@ -2114,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; unsigned long index = off >> info->gtt_entry_size_shift; + unsigned long gma; struct intel_gvt_gtt_entry e; if (bytes != 4 && bytes != 8) return -EINVAL; + gma = index << I915_GTT_PAGE_SHIFT; + if (!intel_gvt_ggtt_validate_range(vgpu, + gma, 1 << I915_GTT_PAGE_SHIFT)) { + gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); + memset(p_data, 0, bytes); + return 0; + } + ggtt_get_guest_entry(ggtt_mm, &e, index); memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), bytes); @@ -2172,7 +2208,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long gma, gfn; - struct intel_gvt_gtt_entry e, m; + struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; + struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; dma_addr_t dma_addr; int ret; struct intel_gvt_partial_pte *partial_pte, *pos, *n; @@ -2239,7 +2276,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (!partial_update && (ops->test_present(&e))) { gfn = ops->get_pfn(&e); - m = e; + m.val64 = e.val64; + m.type = e.type; /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn @@ -2303,7 +2341,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, } static int alloc_scratch_pages(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t type) + enum intel_gvt_gtt_type type) { struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; @@ -2498,6 +2536,7 @@ static void clean_spt_oos(struct intel_gvt *gvt) list_for_each_safe(pos, n, >t->oos_page_free_list_head) { oos_page = container_of(pos, struct intel_vgpu_oos_page, list); list_del(&oos_page->list); + free_page((unsigned long)oos_page->mem); kfree(oos_page); } } @@ -2518,6 +2557,12 @@ static int setup_spt_oos(struct intel_gvt *gvt) ret = -ENOMEM; goto fail; } + oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0); + if (!oos_page->mem) { + ret = -ENOMEM; + kfree(oos_page); + goto fail; + } INIT_LIST_HEAD(&oos_page->list); INIT_LIST_HEAD(&oos_page->vm_list); @@ -2581,7 +2626,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, * Zero on success, negative error code if failed. */ struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) { struct intel_vgpu_mm *mm; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index edb610dc5d86..88789316807d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -95,8 +95,8 @@ struct intel_gvt_gtt { unsigned long scratch_mfn; }; -typedef enum { - GTT_TYPE_INVALID = -1, +enum intel_gvt_gtt_type { + GTT_TYPE_INVALID = 0, GTT_TYPE_GGTT_PTE, @@ -124,7 +124,7 @@ typedef enum { GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_MAX, -} intel_gvt_gtt_type_t; +}; enum intel_gvt_mm_type { INTEL_GVT_MM_GGTT, @@ -148,7 +148,7 @@ struct intel_vgpu_mm { union { struct { - intel_gvt_gtt_type_t root_entry_type; + enum intel_gvt_gtt_type root_entry_type; /* * The 4 PDPs in ring context. For 48bit addressing, * only PDP0 is valid and point to PML4. For 32it @@ -169,7 +169,7 @@ struct intel_vgpu_mm { }; struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]); static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm) { @@ -205,24 +205,25 @@ struct intel_vgpu_gtt { struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; }; -extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); -extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); +int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); +void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); -extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +int intel_gvt_init_gtt(struct intel_gvt *gvt); void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); -extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); +void intel_gvt_clean_gtt(struct intel_gvt *gvt); -extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, - int page_table_level, void *root_entry); +struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, + int page_table_level, + void *root_entry); struct intel_vgpu_oos_page { struct intel_vgpu_ppgtt_spt *spt; struct list_head list; struct list_head vm_list; int id; - unsigned char mem[I915_GTT_PAGE_SIZE]; + void *mem; }; #define GTT_ENTRY_NUM_IN_ONE_PAGE 512 @@ -233,7 +234,7 @@ struct intel_vgpu_ppgtt_spt { struct intel_vgpu *vgpu; struct { - intel_gvt_gtt_type_t type; + enum intel_gvt_gtt_type type; bool pde_ips; /* for 64KB PTEs */ void *vaddr; struct page *page; @@ -241,7 +242,7 @@ struct intel_vgpu_ppgtt_spt { } shadow_page; struct { - intel_gvt_gtt_type_t type; + enum intel_gvt_gtt_type type; bool pde_ips; /* for 64KB PTEs */ unsigned long gfn; unsigned long write_cnt; @@ -267,7 +268,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]); struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]); int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]); diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 43f4242062dd..8f37eefa0a02 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -375,9 +375,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) } gvt->idle_vgpu = vgpu; - ret = intel_gvt_debugfs_init(gvt); - if (ret) - gvt_err("debugfs registration failed, go on.\n"); + intel_gvt_debugfs_init(gvt); gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 8bce09de4b82..b47c6acaf9c0 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -87,14 +87,13 @@ struct intel_vgpu_gm { /* Fences owned by a vGPU */ struct intel_vgpu_fence { - struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; + struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; u32 base; u32 size; }; struct intel_vgpu_mmio { void *vreg; - void *sreg; }; #define INTEL_GVT_MAX_BAR_NUM 4 @@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space { #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) -#define INTEL_GVT_MAX_PIPE 4 - struct intel_vgpu_irq { bool irq_warn_once[INTEL_GVT_EVENT_MAX]; - DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE], + DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES], INTEL_GVT_EVENT_MAX); }; @@ -144,17 +141,17 @@ enum { struct intel_vgpu_submission_ops { const char *name; - int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); - void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); - void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); + int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); + void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); + void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); }; struct intel_vgpu_submission { struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; struct list_head workload_q_head[I915_NUM_ENGINES]; + struct intel_context *shadow[I915_NUM_ENGINES]; struct kmem_cache *workloads; atomic_t running_workload_num; - struct i915_gem_context *shadow_ctx; union { u64 i915_context_pml4; u64 i915_context_pdps[GEN8_3LVL_PDPES]; @@ -337,6 +334,10 @@ struct intel_gvt { struct { struct engine_mmio *mmio; int ctx_mmio_count[I915_NUM_ENGINES]; + u32 *tlb_mmio_offset_list; + u32 tlb_mmio_offset_list_cnt; + u32 *mocs_mmio_offset_list; + u32 mocs_mmio_offset_list_cnt; } engine_mmio_list; struct dentry *debugfs_root; @@ -393,7 +394,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ + gvt_hidden_sz(gvt) - 1) -#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs) +#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences) /* Aperture/GM space definitions for vGPU */ #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) @@ -449,10 +450,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) #define vgpu_vreg64(vgpu, offset) \ (*(u64 *)(vgpu->mmio.vreg + (offset))) -#define vgpu_sreg_t(vgpu, reg) \ - (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg))) -#define vgpu_sreg(vgpu, offset) \ - (*(u32 *)(vgpu->mmio.sreg + (offset))) #define for_each_active_vgpu(gvt, vgpu, id) \ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ @@ -488,7 +485,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, - unsigned int engine_mask); + intel_engine_mask_t engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); @@ -591,12 +588,12 @@ enum { static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) { - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); } static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) { - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** @@ -689,9 +686,9 @@ static inline void intel_gvt_mmio_set_in_ctx( gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; } -int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); -int intel_gvt_debugfs_init(struct intel_gvt *gvt); +void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bc64b810e0d5..bd12af349123 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - unsigned int engine_mask = 0; + intel_engine_mask_t engine_mask = 0; u32 data; write_vreg(vgpu, offset, p_data, bytes); @@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } else { if (data & GEN6_GRDOM_RENDER) { gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); - engine_mask |= (1 << RCS); + engine_mask |= BIT(RCS0); } if (data & GEN6_GRDOM_MEDIA) { gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); - engine_mask |= (1 << VCS); + engine_mask |= BIT(VCS0); } if (data & GEN6_GRDOM_BLT) { gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); - engine_mask |= (1 << BCS); + engine_mask |= BIT(BCS0); } if (data & GEN6_GRDOM_VECS) { gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); - engine_mask |= (1 << VECS); + engine_mask |= BIT(VECS0); } if (data & GEN8_GRDOM_MEDIA2) { gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); - if (HAS_BSD2(vgpu->gvt->dev_priv)) - engine_mask |= (1 << VCS2); + engine_mask |= BIT(VCS1); } + engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; } /* vgpu_lock already hold by emulate mmio r/w */ @@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = { _MMIO(0x2690), _MMIO(0x2694), _MMIO(0x2698), + _MMIO(0x2754), + _MMIO(0x28a0), _MMIO(0x4de0), _MMIO(0x4de4), _MMIO(0x4dfc), @@ -750,18 +752,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - unsigned int index = DSPSURF_TO_PIPE(offset); - i915_reg_t surflive_reg = DSPSURFLIVE(index); - int flip_event[] = { - [PIPE_A] = PRIMARY_A_FLIP_DONE, - [PIPE_B] = PRIMARY_B_FLIP_DONE, - [PIPE_C] = PRIMARY_C_FLIP_DONE, - }; + u32 pipe = DSPSURF_TO_PIPE(offset); + int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY); write_vreg(vgpu, offset, p_data, bytes); - vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); + vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); + + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; + + if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP) + intel_vgpu_trigger_virtual_event(vgpu, event); + else + set_bit(event, vgpu->irq.flip_done_event[pipe]); - set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); return 0; } @@ -771,18 +774,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - unsigned int index = SPRSURF_TO_PIPE(offset); - i915_reg_t surflive_reg = SPRSURFLIVE(index); - int flip_event[] = { - [PIPE_A] = SPRITE_A_FLIP_DONE, - [PIPE_B] = SPRITE_B_FLIP_DONE, - [PIPE_C] = SPRITE_C_FLIP_DONE, - }; + u32 pipe = SPRSURF_TO_PIPE(offset); + int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0); + + write_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); + + if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP) + intel_vgpu_trigger_virtual_event(vgpu, event); + else + set_bit(event, vgpu->irq.flip_done_event[pipe]); + + return 0; +} + +static int reg50080_mmio_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, + unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + enum pipe pipe = REG_50080_TO_PIPE(offset); + enum plane_id plane = REG_50080_TO_PLANE(offset); + int event = SKL_FLIP_EVENT(pipe, plane); write_vreg(vgpu, offset, p_data, bytes); - vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); + if (plane == PLANE_PRIMARY) { + vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; + } else { + vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); + } + + if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC) + intel_vgpu_trigger_virtual_event(vgpu, event); + else + set_bit(event, vgpu->irq.flip_done_event[pipe]); - set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); return 0; } @@ -792,13 +819,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; enum intel_gvt_event_type event; - if (reg == _DPA_AUX_CH_CTL) + if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) event = AUX_CHANNEL_A; - else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL) + else if (reg == _PCH_DPB_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B))) event = AUX_CHANNEL_B; - else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL) + else if (reg == _PCH_DPC_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C))) event = AUX_CHANNEL_C; - else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL) + else if (reg == _PCH_DPD_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D))) event = AUX_CHANNEL_D; else { WARN_ON(true); @@ -1181,7 +1211,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) { - intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; + enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; struct intel_vgpu_mm *mm; u64 *pdps; @@ -1227,18 +1257,15 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - u32 data; - int ret; - - write_vreg(vgpu, offset, p_data, bytes); - data = vgpu_vreg(vgpu, offset); + u32 data = *(u32 *)p_data; + bool invalid_write = false; switch (offset) { case _vgtif_reg(display_ready): send_display_ready_uevent(vgpu, data ? 1 : 0); break; case _vgtif_reg(g2v_notify): - ret = handle_g2v_notification(vgpu, data); + handle_g2v_notification(vgpu, data); break; /* add xhot and yhot to handled list to avoid error log */ case _vgtif_reg(cursor_x_hot): @@ -1255,13 +1282,19 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, case _vgtif_reg(execlist_context_descriptor_hi): break; case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]): + invalid_write = true; enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); break; default: + invalid_write = true; gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", offset, bytes, data); break; } + + if (!invalid_write) + write_vreg(vgpu, offset, p_data, bytes); + return 0; } @@ -1339,7 +1372,6 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u32 trtte = *(u32 *)p_data; if ((trtte & 1) && (trtte & (1 << 1)) == 0) { @@ -1348,11 +1380,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, return -EINVAL; } write_vreg(vgpu, offset, p_data, bytes); - /* TRTTE is not per-context */ - - mmio_hw_access_pre(dev_priv); - I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); - mmio_hw_access_post(dev_priv); return 0; } @@ -1360,15 +1387,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - u32 val = *(u32 *)p_data; - - if (val & 1) { - /* unblock hw logic */ - mmio_hw_access_pre(dev_priv); - I915_WRITE(_MMIO(offset), val); - mmio_hw_access_post(dev_priv); - } write_vreg(vgpu, offset, p_data, bytes); return 0; } @@ -1680,8 +1698,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, bool enable_execlist; int ret; + (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1); + if (IS_COFFEELAKE(vgpu->gvt->dev_priv)) + (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2); write_vreg(vgpu, offset, p_data, bytes); + if (data & _MASKED_BIT_ENABLE(1)) { + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); + return 0; + } + + if (IS_COFFEELAKE(vgpu->gvt->dev_priv) && + data & _MASKED_BIT_ENABLE(2)) { + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); + return 0; + } + /* when PPGTT mode enabled, we will check if guest has called * pvinfo, if not, we will treat this guest as non-gvtg-aware * guest, and stop emulating its cfg space, mmio, gtt, etc. @@ -1704,7 +1736,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; ret = intel_vgpu_select_submission_ops(vgpu, - ENGINE_MASK(ring_id), + BIT(ring_id), INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; @@ -1724,19 +1756,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, switch (offset) { case 0x4260: - id = RCS; + id = RCS0; break; case 0x4264: - id = VCS; + id = VCS0; break; case 0x4268: - id = VCS2; + id = VCS1; break; case 0x426c: - id = BCS; + id = BCS0; break; case 0x4270: - id = VECS; + id = VECS0; break; default: return -EINVAL; @@ -1763,6 +1795,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, return 0; } +static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, + unsigned int bytes) +{ + u32 data = *(u32 *)p_data; + + (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18); + write_vreg(vgpu, offset, p_data, bytes); + + if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8)) + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); + + return 0; +} + #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \ f, s, am, rm, d, r, w); \ @@ -1793,7 +1840,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ - if (HAS_BSD2(dev_priv)) \ + if (HAS_ENGINE(dev_priv, VCS1)) \ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) @@ -1848,7 +1895,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL); MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); - MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); + MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); MMIO_D(GEN7_CXT_SIZE, D_ALL); @@ -1883,7 +1930,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL, + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, @@ -1969,6 +2017,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); MMIO_D(DSPOFFSET(PIPE_A), D_ALL); MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); + MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, + reg50080_mmio_write); MMIO_D(DSPCNTR(PIPE_B), D_ALL); MMIO_D(DSPADDR(PIPE_B), D_ALL); @@ -1978,6 +2028,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); MMIO_D(DSPOFFSET(PIPE_B), D_ALL); MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); + MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL, + reg50080_mmio_write); MMIO_D(DSPCNTR(PIPE_C), D_ALL); MMIO_D(DSPADDR(PIPE_C), D_ALL); @@ -1987,6 +2039,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); MMIO_D(DSPOFFSET(PIPE_C), D_ALL); MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); + MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL, + reg50080_mmio_write); MMIO_D(SPRCTL(PIPE_A), D_ALL); MMIO_D(SPRLINOFF(PIPE_A), D_ALL); @@ -2000,6 +2054,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(SPROFFSET(PIPE_A), D_ALL); MMIO_D(SPRSCALE(PIPE_A), D_ALL); MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); + MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL, + reg50080_mmio_write); MMIO_D(SPRCTL(PIPE_B), D_ALL); MMIO_D(SPRLINOFF(PIPE_B), D_ALL); @@ -2013,6 +2069,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(SPROFFSET(PIPE_B), D_ALL); MMIO_D(SPRSCALE(PIPE_B), D_ALL); MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); + MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL, + reg50080_mmio_write); MMIO_D(SPRCTL(PIPE_C), D_ALL); MMIO_D(SPRLINOFF(PIPE_C), D_ALL); @@ -2026,6 +2084,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(SPROFFSET(PIPE_C), D_ALL); MMIO_D(SPRSCALE(PIPE_C), D_ALL); MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); + MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL, + reg50080_mmio_write); MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); MMIO_D(HBLANK(TRANSCODER_A), D_ALL); @@ -2739,7 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); MMIO_D(WM_MISC, D_BDW); - MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW); + MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); @@ -2815,11 +2875,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); - MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); @@ -2827,26 +2887,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); - MMIO_D(_MMIO(0xa210), D_SKL_PLUS); + MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL); - MMIO_D(_MMIO(0x45504), D_SKL_PLUS); - MMIO_D(_MMIO(0x45520), D_SKL_PLUS); - MMIO_D(_MMIO(0x46000), D_SKL_PLUS); - MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write); - MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write); - MMIO_D(_MMIO(0x6C040), D_SKL_PLUS); - MMIO_D(_MMIO(0x6C048), D_SKL_PLUS); - MMIO_D(_MMIO(0x6C050), D_SKL_PLUS); - MMIO_D(_MMIO(0x6C044), D_SKL_PLUS); - MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS); - MMIO_D(_MMIO(0x6C054), D_SKL_PLUS); - MMIO_D(_MMIO(0x6c058), D_SKL_PLUS); - MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS); - MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL); + MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL); + MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); + MMIO_D(DC_STATE_EN, D_SKL_PLUS); + MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS); + MMIO_D(CDCLK_CTL, D_SKL_PLUS); + MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); + MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); + MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS); + MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS); + MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS); + MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS); + MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS); + MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS); + MMIO_D(DPLL_CTRL1, D_SKL_PLUS); + MMIO_D(DPLL_CTRL2, D_SKL_PLUS); + MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); @@ -2965,40 +3025,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - MMIO_D(_MMIO(0x70380), D_SKL_PLUS); - MMIO_D(_MMIO(0x71380), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS); MMIO_D(_MMIO(0x72380), D_SKL_PLUS); MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); - MMIO_D(_MMIO(0x7039c), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS); - MMIO_D(_MMIO(0x8f074), D_SKL_PLUS); - MMIO_D(_MMIO(0x8f004), D_SKL_PLUS); - MMIO_D(_MMIO(0x8f034), D_SKL_PLUS); + MMIO_D(CSR_SSP_BASE, D_SKL_PLUS); + MMIO_D(CSR_HTP_SKL, D_SKL_PLUS); + MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS); - MMIO_D(_MMIO(0xb11c), D_SKL_PLUS); + MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0x51000), D_SKL_PLUS); - MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS); + MMIO_D(SKL_DFSM, D_SKL_PLUS); + MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS); - MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, + MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, + MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_D(RPM_CONFIG0, D_SKL_PLUS); MMIO_D(_MMIO(0xd08), D_SKL_PLUS); MMIO_D(RC6_LOCATION, D_SKL_PLUS); - MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL); - MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, + MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* TRTT */ - MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS, + MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS, NULL, gen9_trtte_write); MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); @@ -3007,11 +3068,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x46520), D_SKL_PLUS); MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); - MMIO_D(_MMIO(0xb004), D_SKL_PLUS); + MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); MMIO_D(_MMIO(0x65900), D_SKL_PLUS); - MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS); + MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS); MMIO_D(_MMIO(0x4068), D_SKL_PLUS); MMIO_D(_MMIO(0x67054), D_SKL_PLUS); MMIO_D(_MMIO(0x6e560), D_SKL_PLUS); @@ -3036,14 +3097,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); MMIO_D(_MMIO(0x44500), D_SKL_PLUS); - MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); +#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) + MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, + NULL, csfe_chicken1_mmio_write); +#undef CSFE_CHICKEN1_REG MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL); - MMIO_D(_MMIO(0x2248), D_SKL_PLUS); + MMIO_D(GAMT_CHKN_BIT_REG, D_KBL); + MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL); return 0; } @@ -3216,7 +3280,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); MMIO_D(GEN6_GFXPAUSE, D_BXT); - MMIO_D(GEN8_L3SQCREG1, D_BXT); + MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); @@ -3356,6 +3420,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, } for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { + /* pvinfo data doesn't come from hw mmio */ + if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE) + continue; + for (j = 0; j < block->size; j += 4) { ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, @@ -3489,12 +3557,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, return mmio_info->read(vgpu, offset, pdata, bytes); else { u64 ro_mask = mmio_info->ro_mask; - u32 old_vreg = 0, old_sreg = 0; + u32 old_vreg = 0; u64 data = 0; if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { old_vreg = vgpu_vreg(vgpu, offset); - old_sreg = vgpu_sreg(vgpu, offset); } if (likely(!ro_mask)) @@ -3516,8 +3583,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | (vgpu_vreg(vgpu, offset) & mask); - vgpu_sreg(vgpu, offset) = (old_sreg & ~mask) - | (vgpu_sreg(vgpu, offset) & mask); } } diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 67125c5eec6e..11accd3e1023 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -536,7 +536,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); - if (HAS_BSD2(gvt->dev_priv)) { + if (HAS_ENGINE(gvt->dev_priv, VCS1)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, @@ -672,7 +672,7 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt) hrtimer_cancel(&irq->vblank_timer.timer); } -#define VBLNAK_TIMER_PERIOD 16000000 +#define VBLANK_TIMER_PERIOD 16000000 /** * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem @@ -704,7 +704,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); vblank_timer->timer.function = vblank_timer_fn; - vblank_timer->period = VBLNAK_TIMER_PERIOD; + vblank_timer->period = VBLANK_TIMER_PERIOD; return 0; } diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d5fcc447d22f..04a5a0d90823 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, void *buf, unsigned long count, bool is_write) { - void *aperture_va; + void __iomem *aperture_va; if (!intel_vgpu_in_aperture(vgpu, off) || !intel_vgpu_in_aperture(vgpu, off + count)) { @@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, return -EIO; if (is_write) - memcpy(aperture_va + offset_in_page(off), buf, count); + memcpy_toio(aperture_va + offset_in_page(off), buf, count); else - memcpy(buf, aperture_va + offset_in_page(off), count); + memcpy_fromio(buf, aperture_va + offset_in_page(off), count); io_mapping_unmap(aperture_va); @@ -1306,7 +1306,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned int i; int ret; struct vfio_region_info_cap_sparse_mmap *sparse = NULL; - size_t size; int nr_areas = 1; int cap_type_id; @@ -1349,9 +1348,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, VFIO_REGION_INFO_FLAG_WRITE; info.size = gvt_aperture_sz(vgpu->gvt); - size = sizeof(*sparse) + - (nr_areas * sizeof(*sparse->areas)); - sparse = kzalloc(size, GFP_KERNEL); + sparse = kzalloc(struct_size(sparse, areas, nr_areas), + GFP_KERNEL); if (!sparse) return -ENOMEM; @@ -1416,9 +1414,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, switch (cap_type_id) { case VFIO_REGION_INFO_CAP_SPARSE_MMAP: ret = vfio_info_add_capability(&caps, - &sparse->header, sizeof(*sparse) + - (sparse->nr_areas * - sizeof(*sparse->areas))); + &sparse->header, + struct_size(sparse, areas, + sparse->nr_areas)); if (ret) { kfree(sparse); return ret; @@ -1566,27 +1564,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "\n"); } -static ssize_t -hw_id_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct mdev_device *mdev = mdev_from_dev(dev); - - if (mdev) { - struct intel_vgpu *vgpu = (struct intel_vgpu *) - mdev_get_drvdata(mdev); - return sprintf(buf, "%u\n", - vgpu->submission.shadow_ctx->hw_id); - } - return sprintf(buf, "\n"); -} - static DEVICE_ATTR_RO(vgpu_id); -static DEVICE_ATTR_RO(hw_id); static struct attribute *intel_vgpu_attrs[] = { &dev_attr_vgpu_id.attr, - &dev_attr_hw_id.attr, NULL }; @@ -1798,9 +1779,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev) "kvmgt_nr_cache_entries", 0444, vgpu->debugfs, &vgpu->vdev.nr_cache_entries); - if (!info->debugfs_cache_entries) - gvt_vgpu_err("Cannot create kvmgt debugfs entry\n"); - return 0; } @@ -1911,6 +1889,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; + } else if (entry->size != size) { + /* the same gfn with different size: unmap and re-map */ + gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); + __gvt_cache_remove_entry(vgpu, entry); + + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); + if (ret) + goto err_unlock; + + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); + if (ret) + goto err_unmap; } else { kref_get(&entry->ref); *dma_addr = entry->dma_addr; diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index ed4df2f6d60b..a55178884d67 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); - memcpy(vgpu->mmio.sreg, mmio, info->mmio_size); vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; @@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) * touched */ memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); - memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } } @@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; - vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2)); + vgpu->mmio.vreg = vzalloc(info->mmio_size); if (!vgpu->mmio.vreg) return -ENOMEM; - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; - intel_vgpu_reset_mmio(vgpu, true); return 0; @@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) { vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; + vgpu->mmio.vreg = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 7902fb162d09..aaf15916d29a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -34,6 +34,8 @@ */ #include "i915_drv.h" +#include "gt/intel_context.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "trace.h" @@ -41,103 +43,104 @@ /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { - {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ - {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ - {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ - {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ - {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ - {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ - {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ - {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ - - {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ - {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ - {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ - {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ - {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ + {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ + {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ + {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ + {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ + {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ + {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ + {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ + {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ + + {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ + {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ + {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ + {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ + {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ + {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { - {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ - {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ - {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ - {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ - {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ - {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ - {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ - {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ - - {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ - {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ - {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ - {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ - {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ - {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ - {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ - {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ - {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ - {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ - {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ - {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ - {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ - {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ - {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ - {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */ - {RCS, TRVADR, 0, false}, /* 0x4df0 */ - {RCS, TRTTE, 0, false}, /* 0x4df4 */ - - {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ - {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ - {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ - {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ - {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - - {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ - - {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ - - {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ - {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ - - {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ - {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ - {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ - - {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ - {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ - {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ - {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ + {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ + {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ + {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ + {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ + {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ + {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ + {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ + {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ + + {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ + {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ + {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ + {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ + {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ + {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ + {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ + {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ + {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ + {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ + {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ + {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ + {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */ + {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */ + {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */ + {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */ + {RCS0, TRVADR, 0, true}, /* 0x4df0 */ + {RCS0, TRTTE, 0, true}, /* 0x4df4 */ + {RCS0, _MMIO(0x4dfc), 0, true}, + + {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ + {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ + {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ + {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ + {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ + + {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ + + {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ + + {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ + {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ + + {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ + {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ + {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ + + {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ + {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ + {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ + {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct { @@ -146,19 +149,27 @@ static struct { u32 l3cc_table[GEN9_MOCS_SIZE / 2]; } gen9_render_mocs; +static u32 gen9_mocs_mmio_offset_list[] = { + [RCS0] = 0xc800, + [VCS0] = 0xc900, + [VCS1] = 0xca00, + [BCS0] = 0xcc00, + [VECS0] = 0xcb00, +}; + static void load_render_mocs(struct drm_i915_private *dev_priv) { + struct intel_gvt *gvt = dev_priv->gvt; i915_reg_t offset; - u32 regs[] = { - [RCS] = 0xc800, - [VCS] = 0xc900, - [VCS2] = 0xca00, - [BCS] = 0xcc00, - [VECS] = 0xcb00, - }; + u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; + u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; int ring_id, i; - for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { + /* Platform doesn't have mocs mmios. */ + if (!regs) + return; + + for (ring_id = 0; ring_id < cnt; ring_id++) { if (!HAS_ENGINE(dev_priv, ring_id)) continue; offset.reg = regs[ring_id]; @@ -302,7 +313,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, goto out; /* no MOCS register in context except render engine */ - if (req->engine->id != RCS) + if (req->engine->id != RCS0) goto out; ret = restore_render_mocs_control_for_inhibit(vgpu, req); @@ -325,21 +336,28 @@ out: return ret; } +static u32 gen8_tlb_mmio_offset_list[] = { + [RCS0] = 0x4260, + [VCS0] = 0x4264, + [VCS1] = 0x4268, + [BCS0] = 0x426c, + [VECS0] = 0x4270, +}; + static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_uncore *uncore = &dev_priv->uncore; struct intel_vgpu_submission *s = &vgpu->submission; + u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; + u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; enum forcewake_domains fw; i915_reg_t reg; - u32 regs[] = { - [RCS] = 0x4260, - [VCS] = 0x4264, - [VCS2] = 0x4268, - [BCS] = 0x426c, - [VECS] = 0x4270, - }; - if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) + if (!regs) + return; + + if (WARN_ON(ring_id >= cnt)) return; if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending)) @@ -352,21 +370,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) * otherwise device can go to RC6 state and interrupt invalidation * process */ - fw = intel_uncore_forcewake_for_reg(dev_priv, reg, + fw = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9)) + if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) fw |= FORCEWAKE_RENDER; - intel_uncore_forcewake_get(dev_priv, fw); + intel_uncore_forcewake_get(uncore, fw); - I915_WRITE_FW(reg, 0x1); + intel_uncore_write_fw(uncore, reg, 0x1); - if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) + if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50)) gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); else vgpu_vreg_t(vgpu, reg) = 0; - intel_uncore_forcewake_put(dev_priv, fw); + intel_uncore_forcewake_put(uncore, fw); gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); } @@ -379,11 +397,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, u32 old_v, new_v; u32 regs[] = { - [RCS] = 0xc800, - [VCS] = 0xc900, - [VCS2] = 0xca00, - [BCS] = 0xcc00, - [VECS] = 0xcb00, + [RCS0] = 0xc800, + [VCS0] = 0xc900, + [VCS1] = 0xca00, + [BCS0] = 0xcc00, + [VECS0] = 0xcb00, }; int i; @@ -391,8 +409,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) - || IS_COFFEELAKE(dev_priv)) && ring_id == RCS) + if (ring_id == RCS0 && IS_GEN(dev_priv, 9)) return; if (!pre && !gen9_render_mocs.initialized) @@ -415,7 +432,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, offset.reg += 4; } - if (ring_id == RCS) { + if (ring_id == RCS0) { l3_offset.reg = 0xb020; for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { if (pre) @@ -467,11 +484,10 @@ static void switch_mmio(struct intel_vgpu *pre, continue; /* * No need to do save or restore of the mmio which is in context - * state image on kabylake, it's initialized by lri command and + * state image on gen9, it's initialized by lri command and * save or restore with context together. */ - if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) - || IS_COFFEELAKE(dev_priv)) && mmio->in_context) + if (IS_GEN(dev_priv, 9) && mmio->in_context) continue; // save @@ -493,7 +509,7 @@ static void switch_mmio(struct intel_vgpu *pre, * itself. */ if (mmio->in_context && - !is_inhibit_context(&s->shadow_ctx->__engine[ring_id])) + !is_inhibit_context(s->shadow[ring_id])) continue; if (mmio->mask) @@ -550,9 +566,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, * performace for batch mmio read/write, so we need * handle forcewake mannually. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); switch_mmio(pre, next, ring_id); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } /** @@ -564,10 +580,17 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { struct engine_mmio *mmio; - if (INTEL_GEN(gvt->dev_priv) >= 9) + if (INTEL_GEN(gvt->dev_priv) >= 9) { gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; - else + gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; + gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); + gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list; + gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list); + } else { gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; + gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; + gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); + } for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 276db53f1bf1..867e7629025b 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -30,7 +30,7 @@ * not do like this. */ #define _INTEL_BIOS_PRIVATE -#include "intel_vbt_defs.h" +#include "display/intel_vbt_defs.h" #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_VBT (1<<3) diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 428d252344f1..5b66e14c5b7b 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -60,6 +60,37 @@ #define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) #define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) +#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe)) + +#define PLANE_CTL_ASYNC_FLIP (1 << 9) +#define REG50080_FLIP_TYPE_MASK 0x3 +#define REG50080_FLIP_TYPE_ASYNC 0x1 + +#define REG_50080(_pipe, _plane) ({ \ + typeof(_pipe) (p) = (_pipe); \ + typeof(_plane) (q) = (_plane); \ + (((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \ + (_MMIO(0x50090))) : \ + (((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \ + (_MMIO(0x50098))) : \ + (((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \ + (_MMIO(0x5009C))) : \ + (_MMIO(0x50080))))); }) + +#define REG_50080_TO_PIPE(_reg) ({ \ + typeof(_reg) (reg) = (_reg); \ + (((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \ + (((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \ + (((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \ + (INVALID_PIPE)))); }) + +#define REG_50080_TO_PLANE(_reg) ({ \ + typeof(_reg) (reg) = (_reg); \ + (((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \ + (PLANE_PRIMARY) : \ + (((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \ + (PLANE_SPRITE0) : (I915_MAX_PLANES))); }) + #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) @@ -71,6 +102,8 @@ #define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88 #define FORCEWAKE_ACK_HSW_REG 0x130044 +#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1) +#define RB_HEAD_WRAP_CNT_OFF 21 #define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2)) #define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3)) #define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12)) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 1c763a27a412..2369d4a9af94 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { if (scheduler->engine_owner[ring_id] == vgpu) { @@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 05b953793316..5b2a7d072ec9 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -35,6 +35,11 @@ #include <linux/kthread.h> +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_pm.h" +#include "gt/intel_context.h" +#include "gt/intel_ring.h" + #include "i915_drv.h" #include "gvt.h" @@ -80,8 +85,8 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, u32 *reg_state, bool save) { struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; - u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; - u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; + u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; + u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; int i = 0; u32 flex_mmio[] = { i915_mmio_reg_offset(EU_PERF_CNTL0), @@ -93,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, i915_mmio_reg_offset(EU_PERF_CNTL6), }; - if (workload->ring_id != RCS) + if (workload->ring_id != RCS0) return; if (save) { @@ -149,7 +154,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG_MASKED(ctx_ctrl); COPY_REG(ctx_timestamp); - if (ring_id == RCS) { + if (ring_id == RCS0) { COPY_REG(bb_per_ctx_ptr); COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); @@ -177,7 +182,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) context_page_num = 19; i = 2; @@ -190,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); dst = kmap(page); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, I915_GTT_PAGE_SIZE); @@ -277,17 +282,19 @@ static int shadow_context_status_change(struct notifier_block *nb, return NOTIFY_OK; } -static void shadow_context_descriptor_update(struct intel_context *ce) +static void +shadow_context_descriptor_update(struct intel_context *ce, + struct intel_vgpu_workload *workload) { - u64 desc = 0; + u64 desc = ce->lrc_desc; - desc = ce->lrc_desc; - - /* Update bits 0-11 of the context descriptor which includes flags + /* + * Update bits 0-11 of the context descriptor which includes flags * like GEN8_CTX_* cached in desc_template */ - desc &= U64_MAX << 12; - desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1); + desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); + desc |= workload->ctx_desc.addressing_mode << + GEN8_CTX_ADDRESSING_MODE_SHIFT; ce->lrc_desc = desc; } @@ -298,12 +305,29 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) struct i915_request *req = workload->req; void *shadow_ring_buffer_va; u32 *cs; + int err; - if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915) - || IS_COFFEELAKE(req->i915)) - && is_inhibit_context(req->hw_context)) + if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context)) intel_vgpu_restore_inhibit_context(vgpu, req); + /* + * To track whether a request has started on HW, we can emit a + * breadcrumb at the beginning of the request and check its + * timeline's HWSP to see if the breadcrumb has advanced past the + * start of this request. Actually, the request must have the + * init_breadcrumb if its timeline set has_init_bread_crumb, or the + * scheduler might get a wrong state of it during reset. Since the + * requests from gvt always set the has_init_breadcrumb flag, here + * need to do the emit_init_breadcrumb for all the requests. + */ + if (req->engine->emit_init_breadcrumb) { + err = req->engine->emit_init_breadcrumb(req); + if (err) { + gvt_vgpu_err("fail to emit init breadcrumb\n"); + return err; + } + } + /* allocate shadow ring buffer */ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); if (IS_ERR(cs)) { @@ -338,26 +362,26 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.shadow_va = NULL; } -static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, - struct i915_gem_context *ctx) +static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, + struct i915_gem_context *ctx) { struct intel_vgpu_mm *mm = workload->shadow_mm; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + struct i915_ppgtt *ppgtt = + i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); int i = 0; - if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) - return -EINVAL; - if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { - px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; + px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; } else { for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { - px_dma(ppgtt->pdp.page_directory[i]) = - mm->ppgtt_mm.shadow_pdps[i]; + struct i915_page_directory * const pd = + i915_pd_entry(ppgtt->pd, i); + + px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } - return 0; + i915_vm_put(&ppgtt->vm); } static int @@ -365,26 +389,19 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; struct i915_request *rq; - int ret = 0; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); if (workload->req) - goto out; + return 0; - rq = i915_request_alloc(engine, shadow_ctx); + rq = i915_request_create(s->shadow[workload->ring_id]); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); - ret = PTR_ERR(rq); - goto out; + return PTR_ERR(rq); } + workload->req = i915_request_get(rq); -out: - return ret; + return 0; } /** @@ -399,43 +416,22 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; - struct intel_context *ce; int ret; - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&vgpu->vgpu_lock); if (workload->shadow) return 0; - /* pin shadow context by gvt even the shadow context will be pinned - * when i915 alloc request. That is because gvt will update the guest - * context from shadow context when workload is completed, and at that - * moment, i915 may already unpined the shadow context to make the - * shadow_ctx pages invalid. So gvt need to pin itself. After update - * the guest context, gvt can unpin the shadow_ctx safely. - */ - ce = intel_context_pin(shadow_ctx, engine); - if (IS_ERR(ce)) { - gvt_vgpu_err("fail to pin shadow context\n"); - return PTR_ERR(ce); - } - - shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); - shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << - GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated)) - shadow_context_descriptor_update(ce); + shadow_context_descriptor_update(s->shadow[workload->ring_id], + workload); ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) - goto err_unpin; + return ret; - if ((workload->ring_id == RCS) && - (workload->wa_ctx.indirect_ctx.size != 0)) { + if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) goto err_shadow; @@ -445,8 +441,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) return 0; err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); -err_unpin: - intel_context_unpin(ce); return ret; } @@ -485,7 +479,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) bb->obj->base.size); bb->clflush &= ~CLFLUSH_AFTER; } - i915_gem_obj_finish_shmem_access(bb->obj); + i915_gem_object_finish_access(bb->obj); bb->accessing = false; } else { @@ -509,18 +503,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) } ret = i915_gem_object_set_to_gtt_domain(bb->obj, - false); + false); if (ret) goto err; - i915_gem_obj_finish_shmem_access(bb->obj); - bb->accessing = false; - ret = i915_vma_move_to_active(bb->vma, workload->req, 0); if (ret) goto err; + + i915_gem_object_finish_access(bb->obj); + bb->accessing = false; } } return 0; @@ -574,10 +568,18 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } -static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) +static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + u32 ring_base; + + ring_base = dev_priv->engine[workload->ring_id]->mmio_base; + vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start; +} + +static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) +{ struct intel_vgpu_shadow_bb *bb, *pos; if (list_empty(&workload->shadow_bb)) @@ -586,12 +588,10 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) bb = list_first_entry(&workload->shadow_bb, struct intel_vgpu_shadow_bb, list); - mutex_lock(&dev_priv->drm.struct_mutex); - list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { if (bb->accessing) - i915_gem_obj_finish_shmem_access(bb->obj); + i915_gem_object_finish_access(bb->obj); if (bb->va && !IS_ERR(bb->va)) i915_gem_object_unpin_map(bb->obj); @@ -600,18 +600,18 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) i915_vma_unpin(bb->vma); i915_vma_close(bb->vma); } - __i915_gem_object_release_unless_active(bb->obj); + i915_gem_object_put(bb->obj); } list_del(&bb->list); kfree(bb); } - - mutex_unlock(&dev_priv->drm.struct_mutex); } static int prepare_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + int ring = workload->ring_id; int ret = 0; ret = intel_vgpu_pin_mm(workload->shadow_mm); @@ -620,8 +620,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload) return ret; } + if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || + !workload->shadow_mm->ppgtt_mm.shadowed) { + gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); + return -EINVAL; + } + update_shadow_pdps(workload); + set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); + ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { gvt_vgpu_err("fail to vgpu sync oos pages\n"); @@ -671,9 +679,6 @@ err_unpin_mm: static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct i915_request *rq; int ring_id = workload->ring_id; int ret; @@ -682,13 +687,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ring_id, workload); mutex_lock(&vgpu->vgpu_lock); - mutex_lock(&dev_priv->drm.struct_mutex); - - ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); - if (ret < 0) { - gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); - goto err_req; - } ret = intel_gvt_workload_req_alloc(workload); if (ret) @@ -723,7 +721,6 @@ out: err_req: if (ret) workload->status = ret; - mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock); return ret; } @@ -796,14 +793,35 @@ static void update_guest_context(struct intel_vgpu_workload *workload) void *src; unsigned long context_gpa, context_page_num; int i; + struct drm_i915_private *dev_priv = gvt->dev_priv; + u32 ring_base; + u32 head, tail; + u16 wrap_count; gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, workload->ctx_desc.lrca); + head = workload->rb_head; + tail = workload->rb_tail; + wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; + + if (tail < head) { + if (wrap_count == RB_HEAD_WRAP_CNT_MAX) + wrap_count = 0; + else + wrap_count += 1; + } + + head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; + + ring_base = dev_priv->engine[workload->ring_id]->mmio_base; + vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; + vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; + context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0) context_page_num = 19; i = 2; @@ -817,7 +835,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) return; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); src = kmap(page); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, I915_GTT_PAGE_SIZE); @@ -851,16 +869,16 @@ static void update_guest_context(struct intel_vgpu_workload *workload) } void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; struct intel_vgpu_workload *pos, *n; - unsigned int tmp; + intel_engine_mask_t tmp; /* free the unsubmited workloads in the queues. */ - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); @@ -903,8 +921,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) workload->status = 0; } - if (!workload->status && !(vgpu->resetting_eng & - ENGINE_MASK(ring_id))) { + if (!workload->status && + !(vgpu->resetting_eng & BIT(ring_id))) { update_guest_context(workload); for_each_set_bit(event, workload->pending_events, @@ -912,11 +930,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) intel_vgpu_trigger_virtual_event(vgpu, event); } - /* unpin shadow ctx as the shadow_ctx update is done */ - mutex_lock(&rq->i915->drm.struct_mutex); - intel_context_unpin(rq->hw_context); - mutex_unlock(&rq->i915->drm.struct_mutex); - i915_request_put(fetch_and_zero(&workload->req)); } @@ -927,7 +940,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { + if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { /* if workload->status is not successful means HW GPU * has occurred GPU hang or something wrong with i915/GVT, * and GVT won't inject context switch interrupt to guest. @@ -941,7 +954,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) * cleaned up during the resetting process later, so doing * the workload clean up here doesn't have any impact. **/ - intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id)); + intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); } workload->complete(workload); @@ -972,6 +985,7 @@ static int workload_thread(void *priv) int ret; bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm; kfree(p); @@ -995,14 +1009,21 @@ static int workload_thread(void *priv) workload->ring_id, workload, workload->vgpu->id); - intel_runtime_pm_get(gvt->dev_priv); + intel_runtime_pm_get(rpm); gvt_dbg_sched("ring id %d will dispatch workload %p\n", workload->ring_id, workload); if (need_force_wake) - intel_uncore_forcewake_get(gvt->dev_priv, + intel_uncore_forcewake_get(&gvt->dev_priv->uncore, FORCEWAKE_ALL); + /* + * Update the vReg of the vGPU which submitted this + * workload. The vGPU may use these registers for checking + * the context state. The value comes from GPU commands + * in this workload. + */ + update_vreg_in_ctx(workload); ret = dispatch_workload(workload); @@ -1023,10 +1044,10 @@ complete: complete_current_workload(gvt, ring_id); if (need_force_wake) - intel_uncore_forcewake_put(gvt->dev_priv, + intel_uncore_forcewake_put(&gvt->dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put_unchecked(gvt->dev_priv); + intel_runtime_pm_put_unchecked(rpm); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } @@ -1109,17 +1130,20 @@ err: } static void -i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) +i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s, + struct i915_ppgtt *ppgtt) { - struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; int i; - if (i915_vm_is_48bit(&i915_ppgtt->vm)) - px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; - else { - for (i = 0; i < GEN8_3LVL_PDPES; i++) - px_dma(i915_ppgtt->pdp.page_directory[i]) = - s->i915_context_pdps[i]; + if (i915_vm_is_4lvl(&ppgtt->vm)) { + px_dma(ppgtt->pd) = s->i915_context_pml4; + } else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) { + struct i915_page_directory * const pd = + i915_pd_entry(ppgtt->pd, i); + + px_dma(pd) = s->i915_context_pdps[i]; + } } } @@ -1133,10 +1157,15 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_engine_cs *engine; + enum intel_engine_id id; intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); - i915_context_ppgtt_root_restore(s); - i915_gem_context_put(s->shadow_ctx); + + i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); + for_each_engine(engine, vgpu->gvt->dev_priv, id) + intel_context_unpin(s->shadow[id]); + kmem_cache_destroy(s->workloads); } @@ -1150,7 +1179,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) * */ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -1162,17 +1191,20 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, } static void -i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) +i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, + struct i915_ppgtt *ppgtt) { - struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; int i; - if (i915_vm_is_48bit(&i915_ppgtt->vm)) - s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); - else { - for (i = 0; i < GEN8_3LVL_PDPES; i++) - s->i915_context_pdps[i] = - px_dma(i915_ppgtt->pdp.page_directory[i]); + if (i915_vm_is_4lvl(&ppgtt->vm)) { + s->i915_context_pml4 = px_dma(ppgtt->pd); + } else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) { + struct i915_page_directory * const pd = + i915_pd_entry(ppgtt->pd, i); + + s->i915_context_pdps[i] = px_dma(pd); + } } } @@ -1188,17 +1220,48 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) */ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) { + struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct intel_vgpu_submission *s = &vgpu->submission; - enum intel_engine_id i; struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + struct i915_ppgtt *ppgtt; + enum intel_engine_id i; int ret; - s->shadow_ctx = i915_gem_context_create_gvt( - &vgpu->gvt->dev_priv->drm); - if (IS_ERR(s->shadow_ctx)) - return PTR_ERR(s->shadow_ctx); + ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + i915_gem_context_set_force_single_submission(ctx); + + ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); + i915_context_ppgtt_root_save(s, ppgtt); - i915_context_ppgtt_root_save(s); + for_each_engine(engine, i915, i) { + struct intel_context *ce; + + INIT_LIST_HEAD(&s->workload_q_head[i]); + s->shadow[i] = ERR_PTR(-EINVAL); + + ce = intel_context_create(ctx, engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + goto out_shadow_ctx; + } + + if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */ + const unsigned int ring_size = 512 * SZ_4K; + + ce->ring = __intel_context_ring_size(ring_size); + } + + ret = intel_context_pin(ce); + intel_context_put(ce); + if (ret) + goto out_shadow_ctx; + + s->shadow[i] = ce; + } bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); @@ -1214,16 +1277,24 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) goto out_shadow_ctx; } - for_each_engine(engine, vgpu->gvt->dev_priv, i) - INIT_LIST_HEAD(&s->workload_q_head[i]); - atomic_set(&s->running_workload_num, 0); bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); + i915_vm_put(&ppgtt->vm); + i915_gem_context_put(ctx); return 0; out_shadow_ctx: - i915_gem_context_put(s->shadow_ctx); + i915_context_ppgtt_root_restore(s, ppgtt); + for_each_engine(engine, i915, i) { + if (IS_ERR(s->shadow[i])) + break; + + intel_context_unpin(s->shadow[i]); + intel_context_put(s->shadow[i]); + } + i915_vm_put(&ppgtt->vm); + i915_gem_context_put(ctx); return ret; } @@ -1240,7 +1311,7 @@ out_shadow_ctx: * */ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, - unsigned long engine_mask, + intel_engine_mask_t engine_mask, unsigned int interface) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -1344,7 +1415,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct intel_vgpu_mm *mm; struct intel_vgpu *vgpu = workload->vgpu; - intel_gvt_gtt_type_t root_entry_type; + enum intel_gvt_gtt_type root_entry_type; u64 pdps[GVT_RING_CTX_NR_PDPS]; switch (desc->addressing_mode) { @@ -1372,9 +1443,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) -#define get_last_workload(q) \ - (list_empty(q) ? NULL : container_of(q->prev, \ - struct intel_vgpu_workload, list)) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU @@ -1394,11 +1462,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, { struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, ring_id); - struct intel_vgpu_workload *last_workload = get_last_workload(q); + struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa; u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; + u32 guest_head; int ret; ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, @@ -1414,18 +1483,25 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_tail.val), &tail, 4); + guest_head = head; + head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK; - if (last_workload && same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", ring_id); - gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); - /* - * cannot use guest context head pointer here, - * as it might not be updated at this time - */ - head = last_workload->rb_tail; + list_for_each_entry_reverse(last_workload, q, list) { + + if (same_context(&last_workload->ctx_desc, desc)) { + gvt_dbg_el("ring id %d cur workload == last\n", + ring_id); + gvt_dbg_el("ctx head %x real head %lx\n", head, + last_workload->rb_tail); + /* + * cannot use guest context head pointer here, + * as it might not be updated at this time + */ + head = last_workload->rb_tail; + break; + } } gvt_dbg_el("ring id %d begin a new workload\n", ring_id); @@ -1438,6 +1514,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); + if (!intel_gvt_ggtt_validate_range(vgpu, start, + _RING_CTL_BUF_SIZE(ctl))) { + gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); + return ERR_PTR(-EINVAL); + } + workload = alloc_workload(vgpu); if (IS_ERR(workload)) return workload; @@ -1446,11 +1528,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->ctx_desc = *desc; workload->ring_context_gpa = ring_context_gpa; workload->rb_head = head; + workload->guest_rb_head = guest_head; workload->rb_tail = tail; workload->rb_start = start; workload->rb_ctl = ctl; - if (ring_id == RCS) { + if (ring_id == RCS0) { intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + @@ -1461,9 +1544,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->wa_ctx.indirect_ctx.size = (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * CACHELINE_BYTES; + + if (workload->wa_ctx.indirect_ctx.size != 0) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.indirect_ctx.guest_gma, + workload->wa_ctx.indirect_ctx.size)) { + gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", + workload->wa_ctx.indirect_ctx.guest_gma); + kmem_cache_free(s->workloads, workload); + return ERR_PTR(-EINVAL); + } + } + workload->wa_ctx.per_ctx.guest_gma = per_ctx & PER_CTX_ADDR_MASK; workload->wa_ctx.per_ctx.valid = per_ctx & 1; + if (workload->wa_ctx.per_ctx.valid) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.per_ctx.guest_gma, + CACHELINE_BYTES)) { + gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", + workload->wa_ctx.per_ctx.guest_gma); + kmem_cache_free(s->workloads, workload); + return ERR_PTR(-EINVAL); + } + } } gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", @@ -1479,11 +1584,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, * as there is only one pre-allocated buf-obj for shadow. */ if (list_empty(workload_q_head(vgpu, ring_id))) { - intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->drm.struct_mutex); + intel_runtime_pm_get(&dev_priv->runtime_pm); ret = intel_gvt_scan_and_shadow_workload(workload); - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } if (ret) { diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 0635b2c4bed7..c50d14a9ce85 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -100,6 +100,7 @@ struct intel_vgpu_workload { struct execlist_ctx_descriptor_format ctx_desc; struct execlist_ring_context *ring_context; unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len; + unsigned long guest_rb_head; bool restore_inhibit; struct intel_vgpu_elsp_dwords elsp_dwords; bool emulate_schedule_in; @@ -142,12 +143,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, - unsigned long engine_mask); + intel_engine_mask_t engine_mask); void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, - unsigned long engine_mask, + intel_engine_mask_t engine_mask, unsigned int interface); extern const struct intel_vgpu_submission_ops @@ -160,6 +161,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, - unsigned long engine_mask); + intel_engine_mask_t engine_mask); #endif diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c index a3deed692b9c..fe552e877e09 100644 --- a/drivers/gpu/drm/i915/gvt/trace_points.c +++ b/drivers/gpu/drm/i915/gvt/trace_points.c @@ -28,8 +28,6 @@ * */ -#include "trace.h" - #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "trace.h" diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 720e2b10adaa..d5a6e4e3d0fd 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; - vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; @@ -420,9 +420,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_submission; - ret = intel_gvt_debugfs_add_vgpu(vgpu); - if (ret) - goto out_clean_sched_policy; + intel_gvt_debugfs_add_vgpu(vgpu); ret = intel_gvt_hypervisor_set_opregion(vgpu); if (ret) @@ -526,11 +524,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, * GPU engines. For FLR, engine_mask is ignored. */ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, - unsigned int engine_mask) + intel_engine_mask_t engine_mask) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; + intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask; gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", |