aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c35
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h16
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c56
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h5
13 files changed, 145 insertions, 75 deletions
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 120e24c3fc62..38521fa81bf9 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -337,26 +337,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
int pipe, id;
+ int found = false;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- if (pipe_is_enabled(vgpu, pipe))
- goto out;
+ if (pipe_is_enabled(vgpu, pipe)) {
+ found = true;
+ break;
+ }
}
+ if (found)
+ break;
}
/* all the pipes are disabled */
- hrtimer_cancel(&irq->vblank_timer.timer);
- return;
-
-out:
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
-
+ if (!found)
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ else
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+ mutex_unlock(&gvt->lock);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -393,8 +395,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
+ mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -409,11 +413,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
struct intel_vgpu *vgpu;
int id;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
+ mutex_unlock(&gvt->lock);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6f4f8e941fc2..d2eb2f7754b9 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -192,6 +192,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
return obj;
}
+static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
+{
+ if (c && c->x_hot <= c->width && c->y_hot <= c->height)
+ return true;
+ else
+ return false;
+}
+
static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *info,
@@ -229,12 +237,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_pos = c.x_pos;
info->y_pos = c.y_pos;
- /* The invalid cursor hotspot value is delivered to host
- * until we find a way to get the cursor hotspot info of
- * guest OS.
- */
- info->x_hot = UINT_MAX;
- info->y_hot = UINT_MAX;
+ if (validate_hotspot(&c)) {
+ info->x_hot = c.x_hot;
+ info->y_hot = c.y_hot;
+ } else {
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ }
+
info->size = (((info->stride * c.height * c.bpp) / 8)
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 00b788cf8b13..20b502c44eae 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -36,6 +36,7 @@
#include <uapi/drm/drm_fourcc.h>
#include "i915_drv.h"
#include "gvt.h"
+#include "i915_pvinfo.h"
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
@@ -384,6 +385,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+ plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
+ plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a73e1d418c22..4ac18b447247 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
h = (struct gvt_firmware_header *)fw->data;
- crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ crc32_start = offsetofend(struct gvt_firmware_header, crc32);
mem = fw->data + crc32_start;
#define VERIFY(s, a, b) do { \
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 61bd14fcb649..22a3ddff38a3 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -271,11 +271,8 @@ static int gvt_service_thread(void *data)
continue;
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
- (void *)&gvt->service_request)) {
- mutex_lock(&gvt->lock);
+ (void *)&gvt->service_request))
intel_gvt_emulate_vblank(gvt);
- mutex_unlock(&gvt->lock);
- }
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
@@ -379,6 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
idr_init(&gvt->vgpu_idr);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
+ mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2ff0d40281a9..de2a3a2580be 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -170,12 +170,18 @@ struct intel_vgpu_submission {
struct intel_vgpu {
struct intel_gvt *gvt;
+ struct mutex vgpu_lock;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
+
+ /* Both sched_data and sched_ctl can be seen a part of the global gvt
+ * scheduler structure. So below 2 vgpu data are protected
+ * by sched_lock, not vgpu_lock.
+ */
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -294,7 +300,13 @@ struct intel_vgpu_type {
};
struct intel_gvt {
+ /* GVT scope lock, protect GVT itself, and all resource currently
+ * not yet protected by special locks(vgpu and scheduler lock).
+ */
struct mutex lock;
+ /* scheduler scope lock, protect gvt and vgpu schedule related data */
+ struct mutex sched_lock;
+
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
@@ -314,6 +326,10 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
+
+ /* service_request is always used in bit operation, we should always
+ * use it with atomic bit ops so that no need to use gvt big lock.
+ */
unsigned long service_request;
struct {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 4b6532fb789a..d60c2bee00fb 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -316,6 +316,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
}
+ /* vgpu_lock already hold by emulate mmio r/w */
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
@@ -420,7 +421,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
else
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ /* vgpu_lock already hold by emulate mmio r/w */
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_check_vblank_emulation(vgpu->gvt);
+ mutex_lock(&vgpu->vgpu_lock);
return 0;
}
@@ -1204,8 +1208,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
ret = handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
- case 0x78830:
- case 0x78834:
+ case _vgtif_reg(cursor_x_hot):
+ case _vgtif_reg(cursor_y_hot):
case _vgtif_reg(pdp[0].lo):
case _vgtif_reg(pdp[0].hi):
case _vgtif_reg(pdp[1].lo):
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index e4960aff68bd..2be1be2cf49a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
return;
gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (reg_is_mmio(gvt, offset)) {
if (read)
@@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
memcpy(pt, p_data, bytes);
}
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -156,7 +156,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -220,7 +220,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 53e2bd79c97d..256d0db8bbb1 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
- struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track) {
@@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
}
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index d053cbe1dc94..09d7bb72b4ff 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
ktime_t cur_time;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
}
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
+ int ret;
+
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops = &tbs_schedule_ops;
+ ret = gvt->scheduler.sched_ops->init(gvt);
+ mutex_unlock(&gvt->sched_lock);
- return gvt->scheduler.sched_ops->init(gvt);
+ return ret;
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops->clean(gvt);
+ mutex_unlock(&gvt->sched_lock);
}
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
- return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ int ret;
+
+ mutex_lock(&vgpu->gvt->sched_lock);
+ ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
+
+ return ret;
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->sched_lock);
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ mutex_lock(&vgpu->gvt->sched_lock);
if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+ mutex_unlock(&gvt->sched_lock);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+ mutex_lock(&vgpu->gvt->sched_lock);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 7f5e01df95ee..cf5a22cb6e06 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -634,6 +634,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
+ mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
@@ -654,6 +655,7 @@ out:
}
mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -663,7 +665,7 @@ static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
/*
* no current vgpu / will be scheduled out / no workload
@@ -709,7 +711,7 @@ static struct intel_vgpu_workload *pick_next_workload(
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
return workload;
}
@@ -807,7 +809,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
struct i915_request *rq = workload->req;
int event;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->sched_lock);
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
@@ -883,7 +886,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
struct workload_thread_param {
@@ -935,9 +939,7 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_lock(&gvt->lock);
ret = dispatch_workload(workload);
- mutex_unlock(&gvt->lock);
if (ret) {
vgpu = workload->vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 2e0a02a80fe4..889d10f8ee96 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -58,6 +58,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
+
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
@@ -223,22 +226,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt *gvt = vgpu->gvt;
-
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = false;
if (atomic_read(&vgpu->submission.running_workload_num)) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_stop_schedule(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -252,14 +253,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
- idr_remove(&gvt->vgpu_idr, vgpu->id);
- if (idr_is_empty(&gvt->vgpu_idr))
- intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
@@ -269,10 +267,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- vfree(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->lock);
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+ if (idr_is_empty(&gvt->vgpu_idr))
+ intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
+
+ vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -298,6 +302,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->id = IDLE_VGPU_IDR;
vgpu->gvt = gvt;
+ mutex_init(&vgpu->vgpu_lock);
for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
@@ -324,7 +329,10 @@ out_free_vgpu:
*/
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->vgpu_lock);
intel_vgpu_clean_sched_policy(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+
vfree(vgpu);
}
@@ -342,8 +350,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (!vgpu)
return ERR_PTR(-ENOMEM);
- mutex_lock(&gvt->lock);
-
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
@@ -353,6 +359,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
+ mutex_init(&vgpu->vgpu_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
@@ -400,8 +407,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- mutex_unlock(&gvt->lock);
-
return vgpu;
out_clean_sched_policy:
@@ -424,7 +429,6 @@ out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
- mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
@@ -456,12 +460,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+ mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (IS_ERR(vgpu))
- return vgpu;
-
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
+ if (!IS_ERR(vgpu))
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
return vgpu;
}
@@ -473,7 +477,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* @engine_mask: engines to reset for GT reset
*
* This function is called when user wants to reset a virtual GPU through
- * device model reset or GT reset. The caller should hold the gvt lock.
+ * device model reset or GT reset. The caller should hold the vgpu lock.
*
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
* the whole vGPU to default state as when it is created. This vGPU function
@@ -513,9 +517,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_reset_submission(vgpu, resetting_eng);
@@ -555,7 +559,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 55bde4a02289..eeaa3d506d95 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -94,7 +94,10 @@ struct vgt_if {
u32 rsv5[4];
u32 g2v_notify;
- u32 rsv6[7];
+ u32 rsv6[5];
+
+ u32 cursor_x_hot;
+ u32 cursor_y_hot;
struct {
u32 lo;