aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c1005
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c63
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c10
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
24 files changed, 1294 insertions, 197 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 5ddde7349fbd..183f5dc1c3f2 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -116,6 +116,7 @@ config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
+ depends on VFIO_MDEV && VFIO_MDEV_DEVICE
default n
help
Choose this option if you want to enable KVMGT support for
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 8a46a7f31d53..b123c20e2097 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,6 +5,4 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
-
-CFLAGS_kvmgt.o := -Wno-unused-function
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index db516382a4d4..711c31c8d8b4 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return ret;
}
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0;
}
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+ if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
/* First check if it's PCI_COMMAND */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 7eaaf1c9ed2b..6c5fdf5b2ce2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+ intel_vgpu_reset_ggtt(vgpu);
+
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0);
if (IS_ERR(ggtt_mm)) {
@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
+ void *page_addr;
gvt_dbg_core("init gtt\n");
@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENODEV;
}
+ gvt->gtt.scratch_ggtt_page =
+ alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ if (!gvt->gtt.scratch_ggtt_page) {
+ gvt_err("fail to allocate scratch ggtt page\n");
+ return -ENOMEM;
+ }
+
+ page_addr = page_address(gvt->gtt.scratch_ggtt_page);
+
+ gvt->gtt.scratch_ggtt_mfn =
+ intel_gvt_hypervisor_virt_to_mfn(page_addr);
+ if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate scratch ggtt page\n");
+ __free_page(gvt->gtt.scratch_ggtt_page);
+ return -EFAULT;
+ }
+
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
+ __free_page(gvt->gtt.scratch_ggtt_page);
+
if (enable_out_of_sync)
clean_spt_oos(gvt);
}
+
+/**
+ * intel_vgpu_reset_ggtt - reset the GGTT entry
+ * @vgpu: a vGPU
+ *
+ * This function is called at the vGPU create stage
+ * to reset all the GGTT entries.
+ *
+ */
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ u32 index;
+ u32 offset;
+ u32 num_entries;
+ struct intel_gvt_gtt_entry e;
+
+ memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
+ e.type = GTT_TYPE_GGTT_PTE;
+ ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
+ e.val64 |= _PAGE_PRESENT;
+
+ index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
+ num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
+ for (offset = 0; offset < num_entries; offset++)
+ ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+
+ index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
+ num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
+ for (offset = 0; offset < num_entries; offset++)
+ ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d250013bc37b..b315ab3593ec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
+
+ struct page *scratch_ggtt_page;
+ unsigned long scratch_ggtt_mfn;
};
enum {
@@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b1a7c8dd4b5f..0af17016f33f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -164,15 +164,18 @@ struct intel_vgpu {
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
- struct device *mdev;
+ struct mdev_device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
struct rb_root cache;
struct mutex cache_lock;
- void *vfio_group;
struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
} vdev;
#endif
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index dc0365033157..934963970288 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -39,24 +40,13 @@
#include <linux/uuid.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include "i915_drv.h"
#include "gvt.h"
-static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn,
- long npage, int prot, unsigned long *phys_pfn)
-{
- return 0;
-}
-static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn,
- long npage)
-{
- return 0;
-}
-
static const struct intel_gvt_ops *intel_gvt_ops;
-
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
@@ -90,6 +80,15 @@ struct gvt_dma {
kvm_pfn_t pfn;
};
+static inline bool handle_valid(unsigned long handle)
+{
+ return !!(handle & ~0xff);
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev);
+static void intel_vgpu_release_work(struct work_struct *work);
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
+
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -115,12 +114,15 @@ out:
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *entry;
+ kvm_pfn_t pfn;
mutex_lock(&vgpu->vdev.cache_lock);
+
entry = __gvt_cache_find(vgpu, gfn);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ pfn = (entry == NULL) ? 0 : entry->pfn;
- return entry == NULL ? 0 : entry->pfn;
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return pfn;
}
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
@@ -167,9 +169,10 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct device *dev = vgpu->vdev.mdev;
+ struct device *dev = &vgpu->vdev.mdev->dev;
struct gvt_dma *this;
- unsigned long pfn;
+ unsigned long g1;
+ int rc;
mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn);
@@ -178,8 +181,9 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
return;
}
- pfn = this->pfn;
- WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1));
+ g1 = gfn;
+ rc = vfio_unpin_pages(dev, &g1, 1);
+ WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock);
}
@@ -194,15 +198,15 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct device *dev = vgpu->vdev.mdev;
- unsigned long pfn;
+ struct device *dev = &vgpu->vdev.mdev->dev;
+ unsigned long gfn;
mutex_lock(&vgpu->vdev.cache_lock);
while ((node = rb_first(&vgpu->vdev.cache))) {
dma = rb_entry(node, struct gvt_dma, node);
- pfn = dma->pfn;
+ gfn = dma->gfn;
- kvmgt_unpin_pages(dev, &pfn, 1);
+ vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma);
}
mutex_unlock(&vgpu->vdev.cache_lock);
@@ -226,7 +230,53 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL;
}
+static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
static struct attribute *type_attrs[] = {
+ &mdev_type_attr_available_instance.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
NULL,
};
@@ -322,7 +372,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
if (kvmgt_gfn_is_write_protected(info, gfn))
return;
- p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
if (WARN(!p, "gfn: 0x%llx\n", gfn))
return;
@@ -342,6 +392,739 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
}
}
+static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_type *type;
+ struct device *pdev;
+ void *gvt;
+
+ pdev = mdev->parent->dev;
+ gvt = kdev_to_i915(pdev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type) {
+ gvt_err("failed to find type %s to create\n",
+ kobject_name(kobj));
+ return -EINVAL;
+ }
+
+ vgpu = intel_gvt_ops->vgpu_create(gvt, type);
+ if (IS_ERR_OR_NULL(vgpu)) {
+ gvt_err("create intel vgpu failed\n");
+ return -EINVAL;
+ }
+
+ INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+
+ vgpu->vdev.mdev = mdev;
+ mdev_set_drvdata(mdev, vgpu);
+
+ gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
+ dev_name(&mdev->dev));
+ return 0;
+}
+
+static int intel_vgpu_remove(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ if (handle_valid(vgpu->handle))
+ return -EBUSY;
+
+ intel_gvt_ops->vgpu_destroy(vgpu);
+ return 0;
+}
+
+static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long gfn, end_gfn;
+
+ gfn = unmap->iova >> PAGE_SHIFT;
+ end_gfn = gfn + unmap->size / PAGE_SIZE;
+
+ while (gfn < end_gfn)
+ gvt_cache_remove(vgpu, gfn++);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.group_notifier);
+
+ /* the only action we care about */
+ if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
+ vgpu->vdev.kvm = data;
+
+ if (!data)
+ schedule_work(&vgpu->vdev.release_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_open(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+ vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->vdev.iommu_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+ goto out;
+ }
+
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+ ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events,
+ &vgpu->vdev.group_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+ goto undo_iommu;
+ }
+
+ ret = kvmgt_guest_init(mdev);
+ if (ret)
+ goto undo_group;
+
+ atomic_set(&vgpu->vdev.released, 0);
+ return ret;
+
+undo_group:
+ vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY,
+ &vgpu->vdev.group_notifier);
+
+undo_iommu:
+ vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+out:
+ return ret;
+}
+
+static void __intel_vgpu_release(struct intel_vgpu *vgpu)
+{
+ struct kvmgt_guest_info *info;
+ int ret;
+
+ if (!handle_valid(vgpu->handle))
+ return;
+
+ if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+ return;
+
+ ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+ WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+
+ ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
+ &vgpu->vdev.group_notifier);
+ WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+
+ info = (struct kvmgt_guest_info *)vgpu->handle;
+ kvmgt_guest_exit(info);
+
+ vgpu->vdev.kvm = NULL;
+ vgpu->handle = 0;
+}
+
+static void intel_vgpu_release(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ __intel_vgpu_release(vgpu);
+}
+
+static void intel_vgpu_release_work(struct work_struct *work)
+{
+ struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
+ vdev.release_work);
+
+ __intel_vgpu_release(vgpu);
+}
+
+static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
+{
+ u32 start_lo, start_hi;
+ u32 mem_type;
+ int pos = PCI_BASE_ADDRESS_0;
+
+ start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_MASK;
+ mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
+ + pos + 4));
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ /* 1M mem BAR treated as 32-bit BAR */
+ default:
+ /* mem unknown type treated as 32-bit BAR */
+ start_hi = 0;
+ break;
+ }
+
+ return ((u64)start_hi << 32) | start_lo;
+}
+
+static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
+ size_t count, loff_t *ppos, bool is_write)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int ret = -EINVAL;
+
+
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ gvt_err("invalid index: %u\n", index);
+ return -EINVAL;
+ }
+
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ if (is_write)
+ ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
+ buf, count);
+ else
+ ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
+ buf, count);
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ if (is_write) {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ bar0_start + pos, buf, count);
+ } else {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ bar0_start + pos, buf, count);
+ }
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ case VFIO_PCI_BAR3_REGION_INDEX:
+ case VFIO_PCI_BAR4_REGION_INDEX:
+ case VFIO_PCI_BAR5_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ case VFIO_PCI_ROM_REGION_INDEX:
+ default:
+ gvt_err("unsupported region: %u\n", index);
+ }
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
+ false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+
+read_err:
+ return -EFAULT;
+}
+
+static ssize_t intel_vgpu_write(struct mdev_device *mdev,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val,
+ sizeof(val), ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+write_err:
+ return -EFAULT;
+}
+
+static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+{
+ unsigned int index;
+ u64 virtaddr;
+ unsigned long req_size, pgoff = 0;
+ pgprot_t pg_prot;
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index >= VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (index != VFIO_PCI_BAR2_REGION_INDEX)
+ return -EINVAL;
+
+ pg_prot = vma->vm_page_prot;
+ virtaddr = vma->vm_start;
+ req_size = vma->vm_end - vma->vm_start;
+ pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
+}
+
+static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
+{
+ if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
+ return 1;
+
+ return 0;
+}
+
+static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags,
+ void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ struct eventfd_ctx *trigger;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ gvt_err("eventfd_ctx_fdget failed\n");
+ return PTR_ERR(trigger);
+ }
+ vgpu->vdev.msi_trigger = trigger;
+ }
+
+ return 0;
+}
+
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
+ unsigned int index, unsigned int start, unsigned int count,
+ void *data)
+{
+ int (*func)(struct intel_vgpu *vgpu, unsigned int index,
+ unsigned int start, unsigned int count, uint32_t flags,
+ void *data) = NULL;
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = intel_vgpu_set_intx_mask;
+ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ func = intel_vgpu_set_intx_unmask;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_intx_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ /* XXX Need masking support exported */
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_msi_trigger;
+ break;
+ }
+ break;
+ }
+
+ if (!func)
+ return -ENOTTY;
+
+ return func(vgpu, index, start, count, flags, data);
+}
+
+static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long minsz;
+
+ gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
+
+ if (cmd == VFIO_DEVICE_GET_INFO) {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
+ size_t size;
+ int nr_areas = 1;
+ int cap_type_id;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->cfg_space.bar[info.index].size;
+ if (!info.size) {
+ info.flags = 0;
+ break;
+ }
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+ info.flags = 0;
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = VFIO_REGION_INFO_FLAG_CAPS |
+ VFIO_REGION_INFO_FLAG_MMAP |
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ info.size = gvt_aperture_sz(vgpu->gvt);
+
+ size = sizeof(*sparse) +
+ (nr_areas * sizeof(*sparse->areas));
+ sparse = kzalloc(size, GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->nr_areas = nr_areas;
+ cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->areas[0].offset =
+ PAGE_ALIGN(vgpu_aperture_offset(vgpu));
+ sparse->areas[0].size = vgpu_aperture_sz(vgpu);
+ if (!caps.buf) {
+ kfree(caps.buf);
+ caps.buf = NULL;
+ caps.size = 0;
+ }
+ break;
+
+ case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+
+ info.flags = 0;
+ gvt_dbg_core("get region info bar:%d\n", info.index);
+ break;
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ gvt_dbg_core("get region info index:%d\n", info.index);
+ break;
+ default:
+ {
+ struct vfio_region_info_cap_type cap_type;
+
+ if (info.index >= VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions)
+ return -EINVAL;
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+ info.offset =
+ VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->vdev.region[i].size;
+ info.flags = vgpu->vdev.region[i].flags;
+
+ cap_type.type = vgpu->vdev.region[i].type;
+ cap_type.subtype = vgpu->vdev.region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_TYPE,
+ &cap_type);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
+ switch (cap_type_id) {
+ case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_SPARSE_MMAP,
+ sparse);
+ kfree(sparse);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (caps.size) {
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+
+ info.count = intel_vgpu_get_irq_count(vgpu, info.index);
+
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |= (VFIO_IRQ_INFO_MASKABLE |
+ VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int ret = 0;
+ size_t data_size = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+ int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
+ VFIO_PCI_NUM_IRQS, &data_size);
+ if (ret) {
+ gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+ return -EINVAL;
+ }
+ if (data_size) {
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+ }
+
+ ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
+ hdr.start, hdr.count, data);
+ kfree(data);
+
+ return ret;
+ } else if (cmd == VFIO_DEVICE_RESET) {
+ intel_gvt_ops->vgpu_reset(vgpu);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct parent_ops intel_vgpu_ops = {
+ .supported_type_groups = intel_vgpu_type_groups,
+ .create = intel_vgpu_create,
+ .remove = intel_vgpu_remove,
+
+ .open = intel_vgpu_open,
+ .release = intel_vgpu_release,
+
+ .read = intel_vgpu_read,
+ .write = intel_vgpu_write,
+ .mmap = intel_vgpu_mmap,
+ .ioctl = intel_vgpu_ioctl,
+};
+
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
if (!intel_gvt_init_vgpu_type_groups(gvt))
@@ -349,24 +1132,34 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
intel_gvt_ops = ops;
- /* MDEV is not yet available */
- return -ENODEV;
+ return mdev_register_device(dev, &intel_vgpu_ops);
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
{
intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_device(dev);
}
static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct kvm *kvm = info->kvm;
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
struct kvm_memory_slot *slot;
int idx;
+ if (!handle_valid(handle))
+ return -ESRCH;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
spin_lock(&kvm->mmu_lock);
@@ -384,13 +1177,23 @@ out:
static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct kvm *kvm = info->kvm;
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
struct kvm_memory_slot *slot;
int idx;
+ if (!handle_valid(handle))
+ return 0;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
spin_lock(&kvm->mmu_lock);
@@ -476,6 +1279,81 @@ static int kvmgt_detect_host(void)
return kvmgt_check_guest() ? -ENODEV : 0;
}
+static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
+{
+ struct intel_vgpu *itr;
+ struct kvmgt_guest_info *info;
+ int id;
+ bool ret = false;
+
+ mutex_lock(&vgpu->gvt->lock);
+ for_each_active_vgpu(vgpu->gvt, itr, id) {
+ if (!handle_valid(itr->handle))
+ continue;
+
+ info = (struct kvmgt_guest_info *)itr->handle;
+ if (kvm && kvm == info->kvm) {
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&vgpu->gvt->lock);
+ return ret;
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvm *kvm;
+
+ vgpu = mdev_get_drvdata(mdev);
+ if (handle_valid(vgpu->handle))
+ return -EEXIST;
+
+ kvm = vgpu->vdev.kvm;
+ if (!kvm || kvm->mm != current->mm) {
+ gvt_err("KVM is required to use Intel vGPU\n");
+ return -ESRCH;
+ }
+
+ if (__kvmgt_vgpu_exist(vgpu, kvm))
+ return -EEXIST;
+
+ info = vzalloc(sizeof(struct kvmgt_guest_info));
+ if (!info)
+ return -ENOMEM;
+
+ vgpu->handle = (unsigned long)info;
+ info->vgpu = vgpu;
+ info->kvm = kvm;
+
+ kvmgt_protect_table_init(info);
+ gvt_cache_init(vgpu);
+
+ info->track_node.track_write = kvmgt_page_track_write;
+ info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+ kvm_page_track_register_notifier(kvm, &info->track_node);
+
+ return 0;
+}
+
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
+{
+ if (!info) {
+ gvt_err("kvmgt_guest_info invalid\n");
+ return false;
+ }
+
+ kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
+ kvmgt_protect_table_destroy(info);
+ gvt_cache_destroy(info->vgpu);
+ vfree(info);
+
+ return true;
+}
+
static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
{
/* nothing to do here */
@@ -489,63 +1367,72 @@ static void kvmgt_detach_vgpu(unsigned long handle)
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct intel_vgpu *vgpu = info->vgpu;
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
- if (vgpu->vdev.msi_trigger)
- return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1;
+ if (!handle_valid(handle))
+ return -ESRCH;
- return false;
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ return 0;
+
+ return -EFAULT;
}
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
unsigned long pfn;
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvmgt_guest_info *info;
+ struct device *dev;
int rc;
+ if (!handle_valid(handle))
+ return INTEL_GVT_INVALID_ADDR;
+
+ info = (struct kvmgt_guest_info *)handle;
pfn = gvt_cache_find(info->vgpu, gfn);
if (pfn != 0)
return pfn;
- rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ pfn = INTEL_GVT_INVALID_ADDR;
+ dev = &info->vgpu->vdev.mdev->dev;
+ rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) {
- gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn);
- return 0;
+ gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+ return INTEL_GVT_INVALID_ADDR;
}
gvt_cache_add(info->vgpu, gfn, pfn);
return pfn;
}
-static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa)
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
{
- unsigned long pfn;
- gfn_t gfn = gpa_to_gfn(gpa);
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+ int ret;
+ bool kthread = current->mm == NULL;
- pfn = kvmgt_gfn_to_pfn(handle, gfn);
- if (!pfn)
- return NULL;
+ if (!handle_valid(handle))
+ return -ESRCH;
- return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa);
-}
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
-static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len, bool write)
-{
- void *hva = NULL;
+ if (kthread)
+ use_mm(kvm->mm);
- hva = kvmgt_gpa_to_hva(handle, gpa);
- if (!hva)
- return -EFAULT;
+ ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
+ kvm_read_guest(kvm, gpa, buf, len);
- if (write)
- memcpy(hva, buf, len);
- else
- memcpy(buf, hva, len);
+ if (kthread)
+ unuse_mm(kvm->mm);
- return 0;
+ return ret;
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d2a0fbc896c3..81cd921770c6 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 56002a52936d..243224aeabf8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 115fa399608c..3dd7fc662859 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -174,21 +174,35 @@ static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
+ drm_dma_handle_t *phys;
struct sg_table *st;
struct scatterlist *sg;
+ char *vaddr;
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return ERR_PTR(-EINVAL);
+ /* Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+ phys = drm_pci_alloc(obj->base.dev,
+ obj->base.size,
+ roundup_pow_of_two(obj->base.size));
+ if (!phys)
+ return ERR_PTR(-ENOMEM);
+
+ vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ if (IS_ERR(page)) {
+ st = ERR_CAST(page);
+ goto err_phys;
+ }
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -202,34 +216,44 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return ERR_PTR(-ENOMEM);
+ if (!st) {
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
+ }
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
}
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
- sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_address(sg) = phys->busaddr;
sg_dma_len(sg) = obj->base.size;
+ obj->phys_handle = phys;
+ return st;
+
+err_phys:
+ drm_pci_free(obj->base.dev, phys);
return st;
}
static void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
+ struct sg_table *pages,
+ bool needs_clflush)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ if (needs_clflush &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
drm_clflush_sg(pages);
@@ -241,7 +265,7 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- __i915_gem_object_release_shmem(obj, pages);
+ __i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -272,12 +296,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
+
+ drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
- drm_pci_free(obj->base.dev, obj->phys_handle);
i915_gem_object_unpin_pages(obj);
}
@@ -538,15 +563,13 @@ int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
- drm_dma_handle_t *phys;
int ret;
- if (obj->phys_handle) {
- if ((unsigned long)obj->phys_handle->vaddr & (align -1))
- return -EBUSY;
+ if (align > obj->base.size)
+ return -EINVAL;
+ if (obj->ops == &i915_gem_phys_ops)
return 0;
- }
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
@@ -562,12 +585,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->mm.pages)
return -EBUSY;
- /* create a new object */
- phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
- if (!phys)
- return -ENOMEM;
-
- obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
return i915_gem_object_pin_pages(obj);
@@ -1796,8 +1813,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
- PAGE_SHIFT;
+ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -2217,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- __i915_gem_object_release_shmem(obj, pages);
+ __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
@@ -2299,7 +2315,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
if (orig_st->nents == orig_st->orig_nents)
return;
- if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
return;
new_sg = new_st.sgl;
@@ -2318,7 +2334,8 @@ static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int page_count, i;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
@@ -2344,7 +2361,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (st == NULL)
return ERR_PTR(-ENOMEM);
- page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
@@ -2403,8 +2420,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret)
- goto err_pages;
+ if (ret) {
+ /* DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&dev_priv->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
@@ -2687,6 +2721,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
struct intel_timeline *timeline;
+ unsigned long flags;
bool ring_hung;
if (engine->irq_seqno_barrier)
@@ -2722,13 +2757,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
+ timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock(&timeline->lock);
+
list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
- timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
list_for_each_entry(request, &timeline->requests, link)
reset_request(request);
+
+ spin_unlock(&timeline->lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e2b077df2da0..d229f47d1028 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
rcu_assign_pointer(active->request, request);
}
+/**
+ * i915_gem_active_set_retire_fn - updates the retirement callback
+ * @active - the active tracker
+ * @fn - the routine called when the request is retired
+ * @mutex - struct_mutex used to guard retirements
+ *
+ * i915_gem_active_set_retire_fn() updates the function pointer that
+ * is called when the final request associated with the @active tracker
+ * is retired.
+ */
+static inline void
+i915_gem_active_set_retire_fn(struct i915_gem_active *active,
+ i915_gem_retire_fn fn,
+ struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+ active->retire = fn ?: i915_gem_retire_noop;
+}
+
static inline struct drm_i915_gem_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ebaa941c83af..abc78bbfc1dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
*/
- if (start < 4096 && (IS_GEN8(dev_priv) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
+ if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 107ddf51065e..d068af2ec3a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -515,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
- pvec + pinned, NULL);
+ pvec + pinned, NULL, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 47590ab08d7e..3df8d3dd31cd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 8405b5a367d7..7e3545f65257 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,14 +46,20 @@ struct edp_power_seq {
u16 t11_t12;
} __packed;
-/* MIPI Sequence Block definitions */
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
enum mipi_seq {
MIPI_SEQ_END = 0,
- MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cf5cff7b03b8..3dc8724df400 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.vco = 0;
}
-static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 val;
-
- /* inform PCU we want to change CDCLK */
- val = SKL_CDCLK_PREPARE_FOR_CHANGE;
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
-}
-
-static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
-{
- return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
-}
-
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
u32 freq_select, pcu_ack;
+ int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
- if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
- DRM_ERROR("failed to inform PCU about cdclk change\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+ ret);
return;
}
@@ -16802,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state = crtc->config;
- int pixclk = 0;
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16814,23 +16802,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->base.enabled = crtc_state->base.enable;
crtc->active = crtc_state->base.active;
- if (crtc_state->base.active) {
+ if (crtc_state->base.active)
dev_priv->active_crtcs |= 1 << crtc->pipe;
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- pixclk = ilk_pipe_pixel_rate(crtc_state);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc_state->base.adjusted_mode.crtc_clock;
- else
- WARN_ON(dev_priv->display.modeset_calc_cdclk);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
- }
-
- dev_priv->min_pixclk[crtc->pipe] = pixclk;
-
readout_plane_state(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
@@ -16903,6 +16877,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ int pixclk = 0;
+
crtc->base.hwmode = crtc->config->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16930,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ pixclk = ilk_pipe_pixel_rate(crtc->config);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pixclk = crtc->config->base.adjusted_mode.crtc_clock;
+ else
+ WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
+ dev_priv->min_pixclk[crtc->pipe] = pixclk;
+
intel_pipe_config_sanity_check(dev_priv, crtc->config);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 90283edcafba..0b8e8eb85c19 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp);
+ struct intel_dp *intel_dp,
+ bool force_disable_vdd);
static void
intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
@@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
/*
* Even vdd force doesn't work until we've made
@@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
* Only the HW needs to be reprogrammed, the SW state is fixed and
* has been setup during connector init.
*/
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
return 0;
}
@@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
port_name(port), pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
@@ -4014,8 +4015,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* FIXME: we need to synchronize this sort of stuff with hardware
- * readout */
- if (WARN_ON_ONCE(!intel_dp->lane_count))
+ * readout. Currently fast link training doesn't work on boot-up. */
+ if (!intel_dp->lane_count)
return;
/* if link training is requested we should perform it always */
@@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp)
+ struct intel_dp *intel_dp,
+ bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp_on, pp_off, pp_div, port_sel = 0;
@@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
intel_pps_get_registers(dev_priv, intel_dp, &regs);
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power seqeuencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * edp_panel_vdd_on() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power seqeuencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ironlake_get_pp_control(intel_dp);
+
+ WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ DRM_DEBUG_KMS("VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ I915_WRITE(regs.pp_ctrl, pp);
+ }
+
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
@@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 0d8ff0034b88..47cd1b20fb3e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->sb_lock);
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0,
- CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+ CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
+ CHV_GPIO_GPIOTXSTATE(value));
mutex_unlock(&dev_priv->sb_lock);
}
@@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
- [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+ [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
- [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+ [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0a09024d6ca3..d4961fa20c73 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- ret = logical_ring_init(engine);
- if (ret) {
- lrc_destroy_wa_ctx_obj(engine);
- }
-
- return ret;
+ return logical_ring_init(engine);
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index fd0e4dac7cc1..e589e17876dc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
{
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
- overlay->last_flip.retire = retire;
+ i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
+ &overlay->i915->drm.struct_mutex);
i915_gem_active_set(&overlay->last_flip, req);
i915_add_request(req);
}
@@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
- i915_gem_track_fb(overlay->vma->obj, new_bo,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+ vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
overlay->vma = vma;
@@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
+ init_request_active(&overlay->last_flip, NULL);
+
regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_unpin_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d67974eb127a..ae2c0bb4b2e8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-static int
-intel_do_sagv_disable(struct drm_i915_private *dev_priv)
-{
- int ret;
- uint32_t temp = GEN9_SAGV_DISABLE;
-
- ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
- &temp);
- if (ret)
- return ret;
- else
- return temp & GEN9_SAGV_IS_DISABLED;
-}
-
int
intel_disable_sagv(struct drm_i915_private *dev_priv)
{
- int ret, result;
+ int ret;
if (!intel_has_sagv(dev_priv))
return 0;
@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */
- ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
+ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
mutex_unlock(&dev_priv->rps.hw_lock);
- if (ret == -ETIMEDOUT) {
- DRM_ERROR("Request to disable SAGV timed out\n");
- return -ETIMEDOUT;
- }
-
/*
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
- } else if (result < 0) {
- DRM_ERROR("Failed to disable the SAGV\n");
- return result;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+ return ret;
}
dev_priv->sagv_status = I915_SAGV_DISABLED;
@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
return 0;
}
+static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
+{
+ u32 val = request;
+
+ *status = sandybridge_pcode_read(dev_priv, mbox, &val);
+
+ return *status || ((val & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @dev_priv: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 10 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ u32 status;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
+ &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 10ms to
+ * account for interrupts that could reduce the number of these
+ * requests.
+ */
+ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ WARN_ON_ONCE(timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 10);
+ preempt_enable();
+
+out:
+ return ret ? ret : status;
+#undef COND
+}
+
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
/*
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 7b488e2793d9..c6be70686b4a 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
- /* Per platform default */
- if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- i915.enable_psr = 1;
- else
- i915.enable_psr = 0;
- }
+ /* Per platform default: all disabled. */
+ if (i915.enable_psr == -1)
+ i915.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 356c662ad453..87b4af092d54 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
/*
* Disable trickle feed and enable pnd deadline calculation
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d7be0d94ba4d..0bffd3f0c15d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -62,7 +62,7 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
d->wake_count++;
hrtimer_start_range_ns(&d->timer,
- ktime_set(0, NSEC_PER_MSEC),
+ NSEC_PER_MSEC,
NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}