diff options
Diffstat (limited to 'drivers/gpu')
499 files changed, 11665 insertions, 7086 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index f17d01f076c7..835c88318cec 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_TEGRA_HOST1X) += host1x/ obj-y += drm/ vga/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ +obj-$(CONFIG_TRACE_GPU_MEM) += trace/ diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 7f72ef5e7811..f34d08c83485 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -17,7 +17,8 @@ drm-y := drm_auth.o drm_cache.o \ drm_plane.o drm_color_mgmt.o drm_print.o \ drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \ - drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o + drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \ + drm_managed.o drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o @@ -32,8 +33,7 @@ drm-$(CONFIG_PCI) += drm_pci.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o -drm_vram_helper-y := drm_gem_vram_helper.o \ - drm_vram_helper_common.o +drm_vram_helper-y := drm_gem_vram_helper.o obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o drm_ttm_helper-y := drm_gem_ttm_helper.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 50dff69a0f6e..b1172d93c99c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios; adev->bios = NULL; - bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false; - memcpy_fromio(adev->bios, bios, size); + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; - if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); - return false; - } + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios); - adev->bios_size = size; + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen; return true; +free_bios: + kfree(adev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ffeb20f11c07..43d8ed7dbd00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -38,6 +38,7 @@ #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> #include <linux/dma-fence-array.h> +#include <linux/pci-p2pdma.h> /** * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation @@ -179,6 +180,9 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int r; + if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0) + attach->peer2peer = false; + if (attach->dev->driver == adev->dev->driver) return 0; @@ -272,14 +276,21 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, struct dma_buf *dma_buf = attach->dmabuf; struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct sg_table *sgt; long r; if (!bo->pin_count) { - /* move buffer into GTT */ + /* move buffer into GTT or VRAM */ struct ttm_operation_ctx ctx = { false, false }; + unsigned domains = AMDGPU_GEM_DOMAIN_GTT; - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && + attach->peer2peer) { + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + domains |= AMDGPU_GEM_DOMAIN_VRAM; + } + amdgpu_bo_placement_from_domain(bo, domains); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) return ERR_PTR(r); @@ -289,20 +300,34 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, return ERR_PTR(-EBUSY); } - sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); - if (IS_ERR(sgt)) - return sgt; - - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) - goto error_free; + switch (bo->tbo.mem.mem_type) { + case TTM_PL_TT: + sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, + bo->tbo.num_pages); + if (IS_ERR(sgt)) + return sgt; + + if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC)) + goto error_free; + break; + + case TTM_PL_VRAM: + r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev, + dir, &sgt); + if (r) + return ERR_PTR(r); + break; + default: + return ERR_PTR(-EINVAL); + } return sgt; error_free: sg_free_table(sgt); kfree(sgt); - return ERR_PTR(-ENOMEM); + return ERR_PTR(-EBUSY); } /** @@ -318,9 +343,18 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); - sg_free_table(sgt); - kfree(sgt); + struct dma_buf *dma_buf = attach->dmabuf; + struct drm_gem_object *obj = dma_buf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + + if (sgt->sgl->page_link) { + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + sg_free_table(sgt); + kfree(sgt); + } else { + amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt); + } } /** @@ -514,6 +548,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) } static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { + .allow_peer2peer = true, .move_notify = amdgpu_dma_buf_move_notify }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index ba1bb95a3cf9..0e8018c9aa8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -856,7 +856,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) const char *name = pp_lib_thermal_controller_names[controller->ucType]; info.addr = controller->ucI2cAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&adev->pm.i2c_bus->adapter, &info); + i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); } } else { DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 77d988a0033f..4ed9958af94e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pci.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> #include <drm/drm_debugfs.h> @@ -867,7 +868,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) attachment = READ_ONCE(bo->tbo.base.import_attach); if (attachment) - seq_printf(m, " imported from %p", dma_buf); + seq_printf(m, " imported from %p%s", dma_buf, + attachment->peer2peer ? " P2P" : ""); else if (dma_buf) seq_printf(m, " exported as %p", dma_buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ea0199a8f9c9..d5543c25f3c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -863,7 +863,6 @@ struct amdgpu_ttm_tt { static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { (1 << 0), /* HMM_PFN_VALID */ (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ }; static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { @@ -944,7 +943,7 @@ retry: range->notifier_seq = mmu_interval_read_begin(&bo->notifier); down_read(&mm->mmap_sem); - r = hmm_range_fault(range, 0); + r = hmm_range_fault(range); up_read(&mm->mmap_sem); if (unlikely(r <= 0)) { /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 11c0e79e7106..4351d02644a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -24,8 +24,9 @@ #ifndef __AMDGPU_TTM_H__ #define __AMDGPU_TTM_H__ -#include "amdgpu.h" +#include <linux/dma-direction.h> #include <drm/gpu_scheduler.h> +#include "amdgpu.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) @@ -74,6 +75,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); +int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, + struct ttm_mem_reg *mem, + struct device *dev, + enum dma_data_direction dir, + struct sg_table **sgt); +void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, + struct device *dev, + enum dma_data_direction dir, + struct sg_table *sgt); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 82a3299e53c0..128a667ed8fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -22,6 +22,7 @@ * Authors: Christian König */ +#include <linux/dma-mapping.h> #include "amdgpu.h" #include "amdgpu_vm.h" #include "amdgpu_atomfirmware.h" @@ -459,6 +460,104 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, } /** + * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table + * + * @adev: amdgpu device pointer + * @mem: TTM memory object + * @dev: the other device + * @dir: dma direction + * @sgt: resulting sg table + * + * Allocate and fill a sg table from a VRAM allocation. + */ +int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, + struct ttm_mem_reg *mem, + struct device *dev, + enum dma_data_direction dir, + struct sg_table **sgt) +{ + struct drm_mm_node *node; + struct scatterlist *sg; + int num_entries = 0; + unsigned int pages; + int i, r; + + *sgt = kmalloc(sizeof(*sg), GFP_KERNEL); + if (!*sgt) + return -ENOMEM; + + for (pages = mem->num_pages, node = mem->mm_node; + pages; pages -= node->size, ++node) + ++num_entries; + + r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); + if (r) + goto error_free; + + for_each_sg((*sgt)->sgl, sg, num_entries, i) + sg->length = 0; + + node = mem->mm_node; + for_each_sg((*sgt)->sgl, sg, num_entries, i) { + phys_addr_t phys = (node->start << PAGE_SHIFT) + + adev->gmc.aper_base; + size_t size = node->size << PAGE_SHIFT; + dma_addr_t addr; + + ++node; + addr = dma_map_resource(dev, phys, size, dir, + DMA_ATTR_SKIP_CPU_SYNC); + r = dma_mapping_error(dev, addr); + if (r) + goto error_unmap; + + sg_set_page(sg, NULL, size, 0); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = size; + } + return 0; + +error_unmap: + for_each_sg((*sgt)->sgl, sg, num_entries, i) { + if (!sg->length) + continue; + + dma_unmap_resource(dev, sg->dma_address, + sg->length, dir, + DMA_ATTR_SKIP_CPU_SYNC); + } + sg_free_table(*sgt); + +error_free: + kfree(*sgt); + return r; +} + +/** + * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table + * + * @adev: amdgpu device pointer + * @sgt: sg table to free + * + * Free a previously allocate sg table. + */ +void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, + struct device *dev, + enum dma_data_direction dir, + struct sg_table *sgt) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) + dma_unmap_resource(dev, sg->dma_address, + sg->length, dir, + DMA_ATTR_SKIP_CPU_SYNC); + sg_free_table(sgt); + kfree(sgt); +} + +/** * amdgpu_vram_mgr_usage - how many bytes are used in this domain * * @man: TTM memory type manager diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index c0ca9a8229e1..ebd723a0bcfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -691,7 +691,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) } /** - * sdma_v4_0_ring_set_wptr - commit the write pointer + * sdma_v4_0_page_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * @@ -987,7 +987,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) } /** - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch + * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 98f39db81c7b..71309ee3aca3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4745,6 +4745,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) static int amdgpu_dm_connector_late_register(struct drm_connector *connector) { +#if defined(CONFIG_DEBUG_FS) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); int r; @@ -4757,7 +4758,6 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) return r; } -#if defined(CONFIG_DEBUG_FS) connector_debugfs_init(amdgpu_dm_connector); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 69056660672d..ae0a7ef1d595 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -93,17 +93,23 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { - struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(connector); + struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder; - kfree(amdgpu_dm_connector->edid); - amdgpu_dm_connector->edid = NULL; + if (aconnector->dc_sink) { + dc_link_remove_remote_sink(aconnector->dc_link, + aconnector->dc_sink); + dc_sink_release(aconnector->dc_sink); + } + + kfree(aconnector->edid); drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); drm_connector_cleanup(connector); - drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port); - kfree(amdgpu_dm_connector); + drm_dp_mst_put_port_malloc(aconnector->port); + kfree(aconnector); } static int @@ -392,40 +398,13 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, */ amdgpu_dm_connector_funcs_reset(connector); - DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); - drm_dp_mst_get_port_malloc(port); - DRM_DEBUG_KMS(":%d\n", connector->base.id); - return connector; } -static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, - struct drm_connector *connector) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - - DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); - - if (aconnector->dc_sink) { - amdgpu_dm_update_freesync_caps(connector, NULL); - dc_link_remove_remote_sink(aconnector->dc_link, - aconnector->dc_sink); - dc_sink_release(aconnector->dc_sink); - aconnector->dc_sink = NULL; - aconnector->dc_link->cur_link_settings.lane_count = 0; - } - - drm_connector_unregister(connector); - drm_connector_put(connector); -} - static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, - .destroy_connector = dm_dp_destroy_mst_connector, }; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f9fa0f7712b3..9ef9e50a34fa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2521,7 +2521,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) /* get phy test pattern and pattern parameters from DP receiver */ core_link_read_dpcd( link, - DP_TEST_PHY_PATTERN, + DP_PHY_TEST_PATTERN, &dpcd_test_pattern.raw, sizeof(dpcd_test_pattern)); core_link_read_dpcd( diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 3e1b3ed8a05e..d5a3487ccfac 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1732,6 +1732,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) if (ret) goto out; + if (ras && ras->supported) { + ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + if (ret) + goto out; + } + /* clear vbios scratch 6 and 7 for coming asic reinit */ WREG32(adev->bios_scratch_reg_offset + 6, 0); WREG32(adev->bios_scratch_reg_offset + 7, 0); diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index d6a6692db0ac..c05d001163e0 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -137,10 +137,11 @@ static struct drm_info_list arcpgu_debugfs_list[] = { { "clocks", arcpgu_show_pxlclock, 0 }, }; -static int arcpgu_debugfs_init(struct drm_minor *minor) +static void arcpgu_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(arcpgu_debugfs_list, - ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor); + drm_debugfs_create_files(arcpgu_debugfs_list, + ARRAY_SIZE(arcpgu_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 442d4656150a..16dfd5cdb66c 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -14,6 +14,7 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -271,6 +272,7 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); if (err) goto free_kms; + drmm_add_final_kfree(drm, kms); drm->dev_private = mdev; diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 2e053815b54a..194419f47c5e 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -224,10 +224,11 @@ static struct drm_info_list hdlcd_debugfs_list[] = { { "clocks", hdlcd_show_pxlclock, 0 }, }; -static int hdlcd_debugfs_init(struct drm_minor *minor) +static void hdlcd_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(hdlcd_debugfs_list, - ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor); + drm_debugfs_create_files(hdlcd_debugfs_list, + ARRAY_SIZE(hdlcd_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 37d92a06318e..def8c9ffafca 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -548,7 +548,7 @@ static const struct file_operations malidp_debugfs_fops = { .release = single_release, }; -static int malidp_debugfs_init(struct drm_minor *minor) +static void malidp_debugfs_init(struct drm_minor *minor) { struct malidp_drm *malidp = minor->dev->dev_private; @@ -557,7 +557,6 @@ static int malidp_debugfs_init(struct drm_minor *minor) spin_lock_init(&malidp->errors_lock); debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root, minor->dev, &malidp_debugfs_fops); - return 0; } #endif //CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 197dca3fc84c..dd9ed71ed942 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -12,6 +12,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> #include <drm/drm_prime.h> #include <drm/drm_probe_helper.h> #include <drm/drm_fb_helper.h> @@ -103,6 +104,7 @@ static int armada_drm_bind(struct device *dev) kfree(priv); return ret; } + drmm_add_final_kfree(&priv->drm, priv); /* Remove early framebuffers */ ret = drm_fb_helper_remove_conflicting_framebuffers(NULL, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 30aa73a5d9b7..b7ba22dddcad 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -32,6 +32,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_probe_helper.h> @@ -111,6 +112,8 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto err_ast_driver_unload; + drm_fbdev_generic_setup(dev, 32); + return 0; err_ast_driver_unload: diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 18a0a4ce00f6..e5398e3dabe7 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -30,7 +30,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_vram_helper.h> @@ -512,10 +511,6 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) drm_mode_config_reset(dev); - ret = drm_fbdev_generic_setup(dev, 32); - if (ret) - goto out_free; - return 0; out_free: kfree(ast); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index cdd6c46d6557..7a9f20a2fd30 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -881,6 +881,17 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { .atomic_disable = ast_crtc_helper_atomic_disable, }; +static void ast_crtc_reset(struct drm_crtc *crtc) +{ + struct ast_crtc_state *ast_state = + kzalloc(sizeof(*ast_state), GFP_KERNEL); + + if (crtc->state) + crtc->funcs->atomic_destroy_state(crtc, crtc->state); + + __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); +} + static void ast_crtc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); @@ -919,7 +930,7 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc, } static const struct drm_crtc_funcs ast_crtc_funcs = { - .reset = drm_atomic_helper_crtc_reset, + .reset = ast_crtc_reset, .set_config = drm_crtc_helper_set_config, .gamma_set = drm_atomic_helper_legacy_gamma_set, .destroy = ast_crtc_destroy, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index e2019fe97fff..43bc709e3523 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -11,9 +11,10 @@ #include <linux/media-bus-format.h> #include <linux/of_graph.h> +#include <drm/drm_bridge.h> #include <drm/drm_encoder.h> #include <drm/drm_of.h> -#include <drm/drm_bridge.h> +#include <drm/drm_simple_kms_helper.h> #include "atmel_hlcdc_dc.h" @@ -22,10 +23,6 @@ struct atmel_hlcdc_rgb_output { int bus_fmt; }; -static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static struct atmel_hlcdc_rgb_output * atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder) { @@ -98,9 +95,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) return -EINVAL; } - ret = drm_encoder_init(dev, &output->encoder, - &atmel_hlcdc_panel_encoder_funcs, - DRM_MODE_ENCODER_NONE, NULL); + ret = drm_simple_encoder_init(dev, &output->encoder, + DRM_MODE_ENCODER_NONE); if (ret) return ret; diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 917767173ee6..e5bd1d517a18 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h @@ -92,7 +92,6 @@ void bochs_mm_fini(struct bochs_device *bochs); /* bochs_kms.c */ int bochs_kms_init(struct bochs_device *bochs); -void bochs_kms_fini(struct bochs_device *bochs); /* bochs_fbdev.c */ extern const struct drm_mode_config_funcs bochs_mode_funcs; diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index addb0568c1af..e18c51de1196 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -7,6 +7,7 @@ #include <drm/drm_drv.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_managed.h> #include "bochs.h" @@ -21,10 +22,7 @@ static void bochs_unload(struct drm_device *dev) { struct bochs_device *bochs = dev->dev_private; - bochs_kms_fini(bochs); bochs_mm_fini(bochs); - kfree(bochs); - dev->dev_private = NULL; } static int bochs_load(struct drm_device *dev) @@ -32,7 +30,7 @@ static int bochs_load(struct drm_device *dev) struct bochs_device *bochs; int ret; - bochs = kzalloc(sizeof(*bochs), GFP_KERNEL); + bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); if (bochs == NULL) return -ENOMEM; dev->dev_private = bochs; diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 8066d7d370d5..7f4bcfad87e9 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -134,7 +134,11 @@ const struct drm_mode_config_funcs bochs_mode_funcs = { int bochs_kms_init(struct bochs_device *bochs) { - drm_mode_config_init(bochs->dev); + int ret; + + ret = drmm_mode_config_init(bochs->dev); + if (ret) + return ret; bochs->dev->mode_config.max_width = 8192; bochs->dev->mode_config.max_height = 8192; @@ -160,12 +164,3 @@ int bochs_kms_init(struct bochs_device *bochs) return 0; } - -void bochs_kms_fini(struct bochs_device *bochs) -{ - if (!bochs->dev->mode_config.num_connector) - return; - - drm_atomic_helper_shutdown(bochs->dev); - drm_mode_config_cleanup(bochs->dev); -} diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index aaed2347ace9..6ec945f837b8 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -58,6 +58,22 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW to DP++. This is used with the i.MX6 imx-ldb driver. You are likely to say N here. +config DRM_NWL_MIPI_DSI + tristate "Northwest Logic MIPI DSI Host controller" + depends on DRM + depends on COMMON_CLK + depends on OF && HAS_IOMEM + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select DRM_PANEL_BRIDGE + select GENERIC_PHY_MIPI_DPHY + select MFD_SYSCON + select MULTIPLEXER + select REGMAP_MMIO + help + This enables the Northwest Logic MIPI DSI Host controller as + for example found on NXP's i.MX8 Processors. + config DRM_NXP_PTN3460 tristate "NXP PTN3460 DP/LVDS bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 6fb062b5b0f0..b04ac2dfa22c 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o +obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o obj-y += analogix/ obj-y += synopsys/ diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig index 47d4eb9e845d..f46a5e26b5dd 100644 --- a/drivers/gpu/drm/bridge/adv7511/Kconfig +++ b/drivers/gpu/drm/bridge/adv7511/Kconfig @@ -6,7 +6,7 @@ config DRM_I2C_ADV7511 select REGMAP_I2C select DRM_MIPI_DSI help - Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders. + Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders. config DRM_I2C_ADV7511_AUDIO bool "ADV7511 HDMI Audio driver" diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index a428185be2c1..f101dd2819b5 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, { switch (fs) { case 32000: - *n = 4096; + case 48000: + case 96000: + case 192000: + *n = fs * 128 / 1000; break; case 44100: - *n = 6272; - break; - case 48000: - *n = 6144; + case 88200: + case 176400: + *n = fs * 128 / 900; break; } @@ -119,6 +121,9 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data, audio_source = ADV7511_AUDIO_SOURCE_I2S; i2s_format = ADV7511_I2S_FORMAT_LEFT_J; break; + case HDMI_SPDIF: + audio_source = ADV7511_AUDIO_SOURCE_SPDIF; + break; default: return -EINVAL; } @@ -175,11 +180,21 @@ static int audio_startup(struct device *dev, void *data) /* use Audio infoframe updated info */ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1), BIT(5), 0); + /* enable SPDIF receiver */ + if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF) + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, + BIT(7), BIT(7)); + return 0; } static void audio_shutdown(struct device *dev, void *data) { + struct adv7511 *adv7511 = dev_get_drvdata(dev); + + if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF) + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, + BIT(7), 0); } static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component, @@ -213,6 +228,7 @@ static const struct hdmi_codec_pdata codec_data = { .ops = &adv7511_codec_ops, .max_i2s_channels = 2, .i2s = 1, + .spdif = 1, }; int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 9ded2cef57dd..76736fb8ed94 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1652,8 +1652,7 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, } struct analogix_dp_device * -analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, - struct analogix_dp_plat_data *plat_data) +analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) { struct platform_device *pdev = to_platform_device(dev); struct analogix_dp_device *dp; @@ -1756,22 +1755,30 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, irq_flags, "analogix-dp", dp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); - goto err_disable_pm_runtime; + return ERR_PTR(ret); } disable_irq(dp->irq); + return dp; +} +EXPORT_SYMBOL_GPL(analogix_dp_probe); + +int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) +{ + int ret; + dp->drm_dev = drm_dev; dp->encoder = dp->plat_data->encoder; dp->aux.name = "DP-AUX"; dp->aux.transfer = analogix_dpaux_transfer; - dp->aux.dev = &pdev->dev; + dp->aux.dev = dp->dev; ret = drm_dp_aux_register(&dp->aux); if (ret) - return ERR_PTR(ret); + return ret; - pm_runtime_enable(dev); + pm_runtime_enable(dp->dev); ret = analogix_dp_create_bridge(drm_dev, dp); if (ret) { @@ -1779,13 +1786,12 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, goto err_disable_pm_runtime; } - return dp; + return 0; err_disable_pm_runtime: + pm_runtime_disable(dp->dev); - pm_runtime_disable(dev); - - return ERR_PTR(ret); + return ret; } EXPORT_SYMBOL_GPL(analogix_dp_bind); @@ -1802,10 +1808,15 @@ void analogix_dp_unbind(struct analogix_dp_device *dp) drm_dp_aux_unregister(&dp->aux); pm_runtime_disable(dp->dev); - clk_disable_unprepare(dp->clock); } EXPORT_SYMBOL_GPL(analogix_dp_unbind); +void analogix_dp_remove(struct analogix_dp_device *dp) +{ + clk_disable_unprepare(dp->clock); +} +EXPORT_SYMBOL_GPL(analogix_dp_remove); + #ifdef CONFIG_PM int analogix_dp_suspend(struct analogix_dp_device *dp) { diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c new file mode 100644 index 000000000000..b14d725bf609 --- /dev/null +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -0,0 +1,1213 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * i.MX8 NWL MIPI DSI host driver + * + * Copyright (C) 2017 NXP + * Copyright (C) 2020 Purism SPC + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/math64.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mux/consumer.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/sys_soc.h> +#include <linux/time64.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> + +#include <video/mipi_display.h> + +#include "nwl-dsi.h" + +#define DRV_NAME "nwl-dsi" + +/* i.MX8 NWL quirks */ +/* i.MX8MQ errata E11418 */ +#define E11418_HS_MODE_QUIRK BIT(0) + +#define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500) + +enum transfer_direction { + DSI_PACKET_SEND, + DSI_PACKET_RECEIVE, +}; + +#define NWL_DSI_ENDPOINT_LCDIF 0 +#define NWL_DSI_ENDPOINT_DCSS 1 + +struct nwl_dsi_plat_clk_config { + const char *id; + struct clk *clk; + bool present; +}; + +struct nwl_dsi_transfer { + const struct mipi_dsi_msg *msg; + struct mipi_dsi_packet packet; + struct completion completed; + + int status; /* status of transmission */ + enum transfer_direction direction; + bool need_bta; + u8 cmd; + u16 rx_word_count; + size_t tx_len; /* in bytes */ + size_t rx_len; /* in bytes */ +}; + +struct nwl_dsi { + struct drm_bridge bridge; + struct mipi_dsi_host dsi_host; + struct drm_bridge *panel_bridge; + struct device *dev; + struct phy *phy; + union phy_configure_opts phy_cfg; + unsigned int quirks; + + struct regmap *regmap; + int irq; + /* + * The DSI host controller needs this reset sequence according to NWL: + * 1. Deassert pclk reset to get access to DSI regs + * 2. Configure DSI Host and DPHY and enable DPHY + * 3. Deassert ESC and BYTE resets to allow host TX operations) + * 4. Send DSI cmds to configure peripheral (handled by panel drv) + * 5. Deassert DPI reset so DPI receives pixels and starts sending + * DSI data + * + * TODO: Since panel_bridges do their DSI setup in enable we + * currently have 4. and 5. swapped. + */ + struct reset_control *rst_byte; + struct reset_control *rst_esc; + struct reset_control *rst_dpi; + struct reset_control *rst_pclk; + struct mux_control *mux; + + /* DSI clocks */ + struct clk *phy_ref_clk; + struct clk *rx_esc_clk; + struct clk *tx_esc_clk; + struct clk *core_clk; + /* + * hardware bug: the i.MX8MQ needs this clock on during reset + * even when not using LCDIF. + */ + struct clk *lcdif_clk; + + /* dsi lanes */ + u32 lanes; + enum mipi_dsi_pixel_format format; + struct drm_display_mode mode; + unsigned long dsi_mode_flags; + int error; + + struct nwl_dsi_transfer *xfer; +}; + +static const struct regmap_config nwl_dsi_regmap_config = { + .reg_bits = 16, + .val_bits = 32, + .reg_stride = 4, + .max_register = NWL_DSI_IRQ_MASK2, + .name = DRV_NAME, +}; + +static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge) +{ + return container_of(bridge, struct nwl_dsi, bridge); +} + +static int nwl_dsi_clear_error(struct nwl_dsi *dsi) +{ + int ret = dsi->error; + + dsi->error = 0; + return ret; +} + +static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val) +{ + int ret; + + if (dsi->error) + return; + + ret = regmap_write(dsi->regmap, reg, val); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, + "Failed to write NWL DSI reg 0x%x: %d\n", reg, + ret); + dsi->error = ret; + } +} + +static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg) +{ + unsigned int val; + int ret; + + if (dsi->error) + return 0; + + ret = regmap_read(dsi->regmap, reg, &val); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n", + reg, ret); + dsi->error = ret; + } + return val; +} + +static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format) +{ + switch (format) { + case MIPI_DSI_FMT_RGB565: + return NWL_DSI_PIXEL_FORMAT_16; + case MIPI_DSI_FMT_RGB666: + return NWL_DSI_PIXEL_FORMAT_18L; + case MIPI_DSI_FMT_RGB666_PACKED: + return NWL_DSI_PIXEL_FORMAT_18; + case MIPI_DSI_FMT_RGB888: + return NWL_DSI_PIXEL_FORMAT_24; + default: + return -EINVAL; + } +} + +/* + * ps2bc - Picoseconds to byte clock cycles + */ +static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) +{ + u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); + + return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp, + dsi->lanes * 8 * NSEC_PER_SEC); +} + +/* + * ui2bc - UI time periods to byte clock cycles + */ +static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui) +{ + u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); + + return DIV64_U64_ROUND_UP(ui * dsi->lanes, + dsi->mode.clock * 1000 * bpp); +} + +/* + * us2bc - micro seconds to lp clock cycles + */ +static u32 us2lp(u32 lp_clk_rate, unsigned long us) +{ + return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC); +} + +static int nwl_dsi_config_host(struct nwl_dsi *dsi) +{ + u32 cycles; + struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy; + + if (dsi->lanes < 1 || dsi->lanes > 4) + return -EINVAL; + + DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes); + nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1); + + if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { + nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01); + nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01); + } else { + nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00); + nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00); + } + + /* values in byte clock cycles */ + cycles = ui2bc(dsi, cfg->clk_pre); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles); + nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles); + cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles); + cycles += ui2bc(dsi, cfg->clk_pre); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles); + nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles); + cycles = ps2bc(dsi, cfg->hs_exit); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles); + nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles); + + nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01); + nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00); + nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00); + nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00); + /* In LP clock cycles */ + cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles); + nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles); + + return nwl_dsi_clear_error(dsi); +} + +static int nwl_dsi_config_dpi(struct nwl_dsi *dsi) +{ + u32 mode; + int color_format; + bool burst_mode; + int hfront_porch, hback_porch, vfront_porch, vback_porch; + int hsync_len, vsync_len; + + hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay; + hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start; + hback_porch = dsi->mode.htotal - dsi->mode.hsync_end; + + vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay; + vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start; + vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end; + + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock); + + color_format = nwl_dsi_get_dpi_pixel_format(dsi->format); + if (color_format < 0) { + DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n", + dsi->format); + return color_format; + } + DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format); + + nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT); + nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format); + /* + * Adjusting input polarity based on the video mode results in + * a black screen so always pick active low: + */ + nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY, + NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW); + nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY, + NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW); + + burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) && + !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE); + + if (burst_mode) { + nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE); + nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256); + } else { + mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ? + NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES : + NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS); + nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode); + nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, + dsi->mode.hdisplay); + } + + nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch); + nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch); + nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len); + + nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0); + nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1); + nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0); + nwl_dsi_write(dsi, NWL_DSI_VC, 0x0); + + nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay); + nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1); + nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch); + nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch); + + return nwl_dsi_clear_error(dsi); +} + +static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi) +{ + u32 irq_enable; + + nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff); + nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7); + + irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK | + NWL_DSI_RX_PKT_HDR_RCVD_MASK | + NWL_DSI_TX_FIFO_OVFLW_MASK | + NWL_DSI_HS_TX_TIMEOUT_MASK); + + nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable); + + return nwl_dsi_clear_error(dsi); +} + +static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host, + struct mipi_dsi_device *device) +{ + struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); + struct device *dev = dsi->dev; + + DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes, + device->format, device->mode_flags); + + if (device->lanes < 1 || device->lanes > 4) + return -EINVAL; + + dsi->lanes = device->lanes; + dsi->format = device->format; + dsi->dsi_mode_flags = device->mode_flags; + + return 0; +} + +static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) +{ + struct device *dev = dsi->dev; + struct nwl_dsi_transfer *xfer = dsi->xfer; + int err; + u8 *payload = xfer->msg->rx_buf; + u32 val; + u16 word_count; + u8 channel; + u8 data_type; + + xfer->status = 0; + + if (xfer->rx_word_count == 0) { + if (!(status & NWL_DSI_RX_PKT_HDR_RCVD)) + return false; + /* Get the RX header and parse it */ + val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER); + err = nwl_dsi_clear_error(dsi); + if (err) + xfer->status = err; + word_count = NWL_DSI_WC(val); + channel = NWL_DSI_RX_VC(val); + data_type = NWL_DSI_RX_DT(val); + + if (channel != xfer->msg->channel) { + DRM_DEV_ERROR(dev, + "[%02X] Channel mismatch (%u != %u)\n", + xfer->cmd, channel, xfer->msg->channel); + xfer->status = -EINVAL; + return true; + } + + switch (data_type) { + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: + fallthrough; + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: + if (xfer->msg->rx_len > 1) { + /* read second byte */ + payload[1] = word_count >> 8; + ++xfer->rx_len; + } + fallthrough; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: + fallthrough; + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: + if (xfer->msg->rx_len > 0) { + /* read first byte */ + payload[0] = word_count & 0xff; + ++xfer->rx_len; + } + xfer->status = xfer->rx_len; + return true; + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: + word_count &= 0xff; + DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n", + xfer->cmd, word_count); + xfer->status = -EPROTO; + return true; + } + + if (word_count > xfer->msg->rx_len) { + DRM_DEV_ERROR(dev, + "[%02X] Receive buffer too small: %zu (< %u)\n", + xfer->cmd, xfer->msg->rx_len, word_count); + xfer->status = -EINVAL; + return true; + } + + xfer->rx_word_count = word_count; + } else { + /* Set word_count from previous header read */ + word_count = xfer->rx_word_count; + } + + /* If RX payload is not yet received, wait for it */ + if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)) + return false; + + /* Read the RX payload */ + while (word_count >= 4) { + val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); + payload[0] = (val >> 0) & 0xff; + payload[1] = (val >> 8) & 0xff; + payload[2] = (val >> 16) & 0xff; + payload[3] = (val >> 24) & 0xff; + payload += 4; + xfer->rx_len += 4; + word_count -= 4; + } + + if (word_count > 0) { + val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); + switch (word_count) { + case 3: + payload[2] = (val >> 16) & 0xff; + ++xfer->rx_len; + fallthrough; + case 2: + payload[1] = (val >> 8) & 0xff; + ++xfer->rx_len; + fallthrough; + case 1: + payload[0] = (val >> 0) & 0xff; + ++xfer->rx_len; + break; + } + } + + xfer->status = xfer->rx_len; + err = nwl_dsi_clear_error(dsi); + if (err) + xfer->status = err; + + return true; +} + +static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status) +{ + struct nwl_dsi_transfer *xfer = dsi->xfer; + bool end_packet = false; + + if (!xfer) + return; + + if (xfer->direction == DSI_PACKET_SEND && + status & NWL_DSI_TX_PKT_DONE) { + xfer->status = xfer->tx_len; + end_packet = true; + } else if (status & NWL_DSI_DPHY_DIRECTION && + ((status & (NWL_DSI_RX_PKT_HDR_RCVD | + NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) { + end_packet = nwl_dsi_read_packet(dsi, status); + } + + if (end_packet) + complete(&xfer->completed); +} + +static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi) +{ + struct nwl_dsi_transfer *xfer = dsi->xfer; + struct mipi_dsi_packet *pkt = &xfer->packet; + const u8 *payload; + size_t length; + u16 word_count; + u8 hs_mode; + u32 val; + u32 hs_workaround = 0; + + /* Send the payload, if any */ + length = pkt->payload_length; + payload = pkt->payload; + + while (length >= 4) { + val = *(u32 *)payload; + hs_workaround |= !(val & 0xFFFF00); + nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); + payload += 4; + length -= 4; + } + /* Send the rest of the payload */ + val = 0; + switch (length) { + case 3: + val |= payload[2] << 16; + fallthrough; + case 2: + val |= payload[1] << 8; + hs_workaround |= !(val & 0xFFFF00); + fallthrough; + case 1: + val |= payload[0]; + nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); + break; + } + xfer->tx_len = pkt->payload_length; + + /* + * Send the header + * header[0] = Virtual Channel + Data Type + * header[1] = Word Count LSB (LP) or first param (SP) + * header[2] = Word Count MSB (LP) or second param (SP) + */ + word_count = pkt->header[1] | (pkt->header[2] << 8); + if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) { + DRM_DEV_DEBUG_DRIVER(dsi->dev, + "Using hs mode workaround for cmd 0x%x\n", + xfer->cmd); + hs_mode = 1; + } else { + hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1; + } + val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) | + NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) | + NWL_DSI_BTA_TX(xfer->need_bta); + nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val); + + /* Send packet command */ + nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1); +} + +static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host, + const struct mipi_dsi_msg *msg) +{ + struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); + struct nwl_dsi_transfer xfer; + ssize_t ret = 0; + + /* Create packet to be sent */ + dsi->xfer = &xfer; + ret = mipi_dsi_create_packet(&xfer.packet, msg); + if (ret < 0) { + dsi->xfer = NULL; + return ret; + } + + if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM || + msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM || + msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM || + msg->type & MIPI_DSI_DCS_READ) && + msg->rx_len > 0 && msg->rx_buf) + xfer.direction = DSI_PACKET_RECEIVE; + else + xfer.direction = DSI_PACKET_SEND; + + xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE); + xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0; + xfer.msg = msg; + xfer.status = -ETIMEDOUT; + xfer.rx_word_count = 0; + xfer.rx_len = 0; + xfer.cmd = 0x00; + if (msg->tx_len > 0) + xfer.cmd = ((u8 *)(msg->tx_buf))[0]; + init_completion(&xfer.completed); + + ret = clk_prepare_enable(dsi->rx_esc_clk); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n", + ret); + return ret; + } + DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n", + clk_get_rate(dsi->rx_esc_clk)); + + /* Initiate the DSI packet transmision */ + nwl_dsi_begin_transmission(dsi); + + if (!wait_for_completion_timeout(&xfer.completed, + NWL_DSI_MIPI_FIFO_TIMEOUT)) { + DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n", + xfer.cmd); + ret = -ETIMEDOUT; + } else { + ret = xfer.status; + } + + clk_disable_unprepare(dsi->rx_esc_clk); + + return ret; +} + +static const struct mipi_dsi_host_ops nwl_dsi_host_ops = { + .attach = nwl_dsi_host_attach, + .transfer = nwl_dsi_host_transfer, +}; + +static irqreturn_t nwl_dsi_irq_handler(int irq, void *data) +{ + u32 irq_status; + struct nwl_dsi *dsi = data; + + irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS); + + if (irq_status & NWL_DSI_TX_FIFO_OVFLW) + DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n"); + + if (irq_status & NWL_DSI_HS_TX_TIMEOUT) + DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n"); + + if (irq_status & NWL_DSI_TX_PKT_DONE || + irq_status & NWL_DSI_RX_PKT_HDR_RCVD || + irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD) + nwl_dsi_finish_transmission(dsi, irq_status); + + return IRQ_HANDLED; +} + +static int nwl_dsi_enable(struct nwl_dsi *dsi) +{ + struct device *dev = dsi->dev; + union phy_configure_opts *phy_cfg = &dsi->phy_cfg; + int ret; + + if (!dsi->lanes) { + DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes); + return -EINVAL; + } + + ret = phy_init(dsi->phy); + if (ret < 0) { + DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret); + return ret; + } + + ret = phy_configure(dsi->phy, phy_cfg); + if (ret < 0) { + DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret); + goto uninit_phy; + } + + ret = clk_prepare_enable(dsi->tx_esc_clk); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n", + ret); + goto uninit_phy; + } + DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n", + clk_get_rate(dsi->tx_esc_clk)); + + ret = nwl_dsi_config_host(dsi); + if (ret < 0) { + DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret); + goto disable_clock; + } + + ret = nwl_dsi_config_dpi(dsi); + if (ret < 0) { + DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret); + goto disable_clock; + } + + ret = phy_power_on(dsi->phy); + if (ret < 0) { + DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret); + goto disable_clock; + } + + ret = nwl_dsi_init_interrupts(dsi); + if (ret < 0) + goto power_off_phy; + + return ret; + +power_off_phy: + phy_power_off(dsi->phy); +disable_clock: + clk_disable_unprepare(dsi->tx_esc_clk); +uninit_phy: + phy_exit(dsi->phy); + + return ret; +} + +static int nwl_dsi_disable(struct nwl_dsi *dsi) +{ + struct device *dev = dsi->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n"); + + phy_power_off(dsi->phy); + phy_exit(dsi->phy); + + /* Disabling the clock before the phy breaks enabling dsi again */ + clk_disable_unprepare(dsi->tx_esc_clk); + + return 0; +} + +static void nwl_dsi_bridge_disable(struct drm_bridge *bridge) +{ + struct nwl_dsi *dsi = bridge_to_dsi(bridge); + int ret; + + nwl_dsi_disable(dsi); + + ret = reset_control_assert(dsi->rst_dpi); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret); + return; + } + ret = reset_control_assert(dsi->rst_byte); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret); + return; + } + ret = reset_control_assert(dsi->rst_esc); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret); + return; + } + ret = reset_control_assert(dsi->rst_pclk); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret); + return; + } + + clk_disable_unprepare(dsi->core_clk); + clk_disable_unprepare(dsi->lcdif_clk); + + pm_runtime_put(dsi->dev); +} + +static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi, + const struct drm_display_mode *mode, + union phy_configure_opts *phy_opts) +{ + unsigned long rate; + int ret; + + if (dsi->lanes < 1 || dsi->lanes > 4) + return -EINVAL; + + /* + * So far the DPHY spec minimal timings work for both mixel + * dphy and nwl dsi host + */ + ret = phy_mipi_dphy_get_default_config(mode->clock * 1000, + mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes, + &phy_opts->mipi_dphy); + if (ret < 0) + return ret; + + rate = clk_get_rate(dsi->tx_esc_clk); + DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate); + phy_opts->mipi_dphy.lp_clk_rate = rate; + + return 0; +} + +static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* At least LCDIF + NWL needs active high sync */ + adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); + adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); + + return true; +} + +static enum drm_mode_status +nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_mode *mode) +{ + struct nwl_dsi *dsi = bridge_to_dsi(bridge); + int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); + + if (mode->clock * bpp > 15000000 * dsi->lanes) + return MODE_CLOCK_HIGH; + + if (mode->clock * bpp < 80000 * dsi->lanes) + return MODE_CLOCK_LOW; + + return MODE_OK; +} + +static void +nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct nwl_dsi *dsi = bridge_to_dsi(bridge); + struct device *dev = dsi->dev; + union phy_configure_opts new_cfg; + unsigned long phy_ref_rate; + int ret; + + ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg); + if (ret < 0) + return; + + /* + * If hs clock is unchanged, we're all good - all parameters are + * derived from it atm. + */ + if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate) + return; + + phy_ref_rate = clk_get_rate(dsi->phy_ref_clk); + DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate); + /* Save the new desired phy config */ + memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg)); + + memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode)); + drm_mode_debug_printmodeline(adjusted_mode); +} + +static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct nwl_dsi *dsi = bridge_to_dsi(bridge); + int ret; + + pm_runtime_get_sync(dsi->dev); + + if (clk_prepare_enable(dsi->lcdif_clk) < 0) + return; + if (clk_prepare_enable(dsi->core_clk) < 0) + return; + + /* Step 1 from DSI reset-out instructions */ + ret = reset_control_deassert(dsi->rst_pclk); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret); + return; + } + + /* Step 2 from DSI reset-out instructions */ + nwl_dsi_enable(dsi); + + /* Step 3 from DSI reset-out instructions */ + ret = reset_control_deassert(dsi->rst_esc); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret); + return; + } + ret = reset_control_deassert(dsi->rst_byte); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret); + return; + } +} + +static void nwl_dsi_bridge_enable(struct drm_bridge *bridge) +{ + struct nwl_dsi *dsi = bridge_to_dsi(bridge); + int ret; + + /* Step 5 from DSI reset-out instructions */ + ret = reset_control_deassert(dsi->rst_dpi); + if (ret < 0) + DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret); +} + +static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct nwl_dsi *dsi = bridge_to_dsi(bridge); + struct drm_bridge *panel_bridge; + struct drm_panel *panel; + int ret; + + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { + DRM_ERROR("Fix bridge driver to make connector optional!"); + return -EINVAL; + } + + ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel, + &panel_bridge); + if (ret) + return ret; + + if (panel) { + panel_bridge = drm_panel_bridge_add(panel); + if (IS_ERR(panel_bridge)) + return PTR_ERR(panel_bridge); + } + dsi->panel_bridge = panel_bridge; + + if (!dsi->panel_bridge) + return -EPROBE_DEFER; + + return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge, + flags); +} + +static void nwl_dsi_bridge_detach(struct drm_bridge *bridge) +{ struct nwl_dsi *dsi = bridge_to_dsi(bridge); + + drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0); +} + +static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = { + .pre_enable = nwl_dsi_bridge_pre_enable, + .enable = nwl_dsi_bridge_enable, + .disable = nwl_dsi_bridge_disable, + .mode_fixup = nwl_dsi_bridge_mode_fixup, + .mode_set = nwl_dsi_bridge_mode_set, + .mode_valid = nwl_dsi_bridge_mode_valid, + .attach = nwl_dsi_bridge_attach, + .detach = nwl_dsi_bridge_detach, +}; + +static int nwl_dsi_parse_dt(struct nwl_dsi *dsi) +{ + struct platform_device *pdev = to_platform_device(dsi->dev); + struct clk *clk; + void __iomem *base; + int ret; + + dsi->phy = devm_phy_get(dsi->dev, "dphy"); + if (IS_ERR(dsi->phy)) { + ret = PTR_ERR(dsi->phy); + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret); + return ret; + } + + clk = devm_clk_get(dsi->dev, "lcdif"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n", + ret); + return ret; + } + dsi->lcdif_clk = clk; + + clk = devm_clk_get(dsi->dev, "core"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n", + ret); + return ret; + } + dsi->core_clk = clk; + + clk = devm_clk_get(dsi->dev, "phy_ref"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n", + ret); + return ret; + } + dsi->phy_ref_clk = clk; + + clk = devm_clk_get(dsi->dev, "rx_esc"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n", + ret); + return ret; + } + dsi->rx_esc_clk = clk; + + clk = devm_clk_get(dsi->dev, "tx_esc"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n", + ret); + return ret; + } + dsi->tx_esc_clk = clk; + + dsi->mux = devm_mux_control_get(dsi->dev, NULL); + if (IS_ERR(dsi->mux)) { + ret = PTR_ERR(dsi->mux); + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret); + return ret; + } + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + dsi->regmap = + devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config); + if (IS_ERR(dsi->regmap)) { + ret = PTR_ERR(dsi->regmap); + DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n", + ret); + return ret; + } + + dsi->irq = platform_get_irq(pdev, 0); + if (dsi->irq < 0) { + DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n", + dsi->irq); + return dsi->irq; + } + + dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk"); + if (IS_ERR(dsi->rst_pclk)) { + DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n", + PTR_ERR(dsi->rst_pclk)); + return PTR_ERR(dsi->rst_pclk); + } + dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte"); + if (IS_ERR(dsi->rst_byte)) { + DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n", + PTR_ERR(dsi->rst_byte)); + return PTR_ERR(dsi->rst_byte); + } + dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc"); + if (IS_ERR(dsi->rst_esc)) { + DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n", + PTR_ERR(dsi->rst_esc)); + return PTR_ERR(dsi->rst_esc); + } + dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi"); + if (IS_ERR(dsi->rst_dpi)) { + DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n", + PTR_ERR(dsi->rst_dpi)); + return PTR_ERR(dsi->rst_dpi); + } + return 0; +} + +static int nwl_dsi_select_input(struct nwl_dsi *dsi) +{ + struct device_node *remote; + u32 use_dcss = 1; + int ret; + + remote = of_graph_get_remote_node(dsi->dev->of_node, 0, + NWL_DSI_ENDPOINT_LCDIF); + if (remote) { + use_dcss = 0; + } else { + remote = of_graph_get_remote_node(dsi->dev->of_node, 0, + NWL_DSI_ENDPOINT_DCSS); + if (!remote) { + DRM_DEV_ERROR(dsi->dev, + "No valid input endpoint found\n"); + return -EINVAL; + } + } + + DRM_DEV_INFO(dsi->dev, "Using %s as input source\n", + (use_dcss) ? "DCSS" : "LCDIF"); + ret = mux_control_try_select(dsi->mux, use_dcss); + if (ret < 0) + DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret); + + of_node_put(remote); + return ret; +} + +static int nwl_dsi_deselect_input(struct nwl_dsi *dsi) +{ + int ret; + + ret = mux_control_deselect(dsi->mux); + if (ret < 0) + DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret); + + return ret; +} + +static const struct drm_bridge_timings nwl_dsi_timings = { + .input_bus_flags = DRM_BUS_FLAG_DE_LOW, +}; + +static const struct of_device_id nwl_dsi_dt_ids[] = { + { .compatible = "fsl,imx8mq-nwl-dsi", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids); + +static const struct soc_device_attribute nwl_dsi_quirks_match[] = { + { .soc_id = "i.MX8MQ", .revision = "2.0", + .data = (void *)E11418_HS_MODE_QUIRK }, + { /* sentinel. */ }, +}; + +static int nwl_dsi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct soc_device_attribute *attr; + struct nwl_dsi *dsi; + int ret; + + dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); + if (!dsi) + return -ENOMEM; + + dsi->dev = dev; + + ret = nwl_dsi_parse_dt(dsi); + if (ret) + return ret; + + ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0, + dev_name(dev), dsi); + if (ret < 0) { + DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq, + ret); + return ret; + } + + dsi->dsi_host.ops = &nwl_dsi_host_ops; + dsi->dsi_host.dev = dev; + ret = mipi_dsi_host_register(&dsi->dsi_host); + if (ret) { + DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret); + return ret; + } + + attr = soc_device_match(nwl_dsi_quirks_match); + if (attr) + dsi->quirks = (uintptr_t)attr->data; + + dsi->bridge.driver_private = dsi; + dsi->bridge.funcs = &nwl_dsi_bridge_funcs; + dsi->bridge.of_node = dev->of_node; + dsi->bridge.timings = &nwl_dsi_timings; + + dev_set_drvdata(dev, dsi); + pm_runtime_enable(dev); + + ret = nwl_dsi_select_input(dsi); + if (ret < 0) { + mipi_dsi_host_unregister(&dsi->dsi_host); + return ret; + } + + drm_bridge_add(&dsi->bridge); + return 0; +} + +static int nwl_dsi_remove(struct platform_device *pdev) +{ + struct nwl_dsi *dsi = platform_get_drvdata(pdev); + + nwl_dsi_deselect_input(dsi); + mipi_dsi_host_unregister(&dsi->dsi_host); + drm_bridge_remove(&dsi->bridge); + pm_runtime_disable(&pdev->dev); + return 0; +} + +static struct platform_driver nwl_dsi_driver = { + .probe = nwl_dsi_probe, + .remove = nwl_dsi_remove, + .driver = { + .of_match_table = nwl_dsi_dt_ids, + .name = DRV_NAME, + }, +}; + +module_platform_driver(nwl_dsi_driver); + +MODULE_AUTHOR("NXP Semiconductor"); +MODULE_AUTHOR("Purism SPC"); +MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver"); +MODULE_LICENSE("GPL"); /* GPLv2 or later */ diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h new file mode 100644 index 000000000000..a247a8a11c7c --- /dev/null +++ b/drivers/gpu/drm/bridge/nwl-dsi.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * NWL MIPI DSI host driver + * + * Copyright (C) 2017 NXP + * Copyright (C) 2019 Purism SPC + */ +#ifndef __NWL_DSI_H__ +#define __NWL_DSI_H__ + +/* DSI HOST registers */ +#define NWL_DSI_CFG_NUM_LANES 0x0 +#define NWL_DSI_CFG_NONCONTINUOUS_CLK 0x4 +#define NWL_DSI_CFG_T_PRE 0x8 +#define NWL_DSI_CFG_T_POST 0xc +#define NWL_DSI_CFG_TX_GAP 0x10 +#define NWL_DSI_CFG_AUTOINSERT_EOTP 0x14 +#define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP 0x18 +#define NWL_DSI_CFG_HTX_TO_COUNT 0x1c +#define NWL_DSI_CFG_LRX_H_TO_COUNT 0x20 +#define NWL_DSI_CFG_BTA_H_TO_COUNT 0x24 +#define NWL_DSI_CFG_TWAKEUP 0x28 +#define NWL_DSI_CFG_STATUS_OUT 0x2c +#define NWL_DSI_RX_ERROR_STATUS 0x30 + +/* DSI DPI registers */ +#define NWL_DSI_PIXEL_PAYLOAD_SIZE 0x200 +#define NWL_DSI_PIXEL_FIFO_SEND_LEVEL 0x204 +#define NWL_DSI_INTERFACE_COLOR_CODING 0x208 +#define NWL_DSI_PIXEL_FORMAT 0x20c +#define NWL_DSI_VSYNC_POLARITY 0x210 +#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0 +#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1) + +#define NWL_DSI_HSYNC_POLARITY 0x214 +#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0 +#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1) + +#define NWL_DSI_VIDEO_MODE 0x218 +#define NWL_DSI_HFP 0x21c +#define NWL_DSI_HBP 0x220 +#define NWL_DSI_HSA 0x224 +#define NWL_DSI_ENABLE_MULT_PKTS 0x228 +#define NWL_DSI_VBP 0x22c +#define NWL_DSI_VFP 0x230 +#define NWL_DSI_BLLP_MODE 0x234 +#define NWL_DSI_USE_NULL_PKT_BLLP 0x238 +#define NWL_DSI_VACTIVE 0x23c +#define NWL_DSI_VC 0x240 + +/* DSI APB PKT control */ +#define NWL_DSI_TX_PAYLOAD 0x280 +#define NWL_DSI_PKT_CONTROL 0x284 +#define NWL_DSI_SEND_PACKET 0x288 +#define NWL_DSI_PKT_STATUS 0x28c +#define NWL_DSI_PKT_FIFO_WR_LEVEL 0x290 +#define NWL_DSI_PKT_FIFO_RD_LEVEL 0x294 +#define NWL_DSI_RX_PAYLOAD 0x298 +#define NWL_DSI_RX_PKT_HEADER 0x29c + +/* DSI IRQ handling */ +#define NWL_DSI_IRQ_STATUS 0x2a0 +#define NWL_DSI_SM_NOT_IDLE BIT(0) +#define NWL_DSI_TX_PKT_DONE BIT(1) +#define NWL_DSI_DPHY_DIRECTION BIT(2) +#define NWL_DSI_TX_FIFO_OVFLW BIT(3) +#define NWL_DSI_TX_FIFO_UDFLW BIT(4) +#define NWL_DSI_RX_FIFO_OVFLW BIT(5) +#define NWL_DSI_RX_FIFO_UDFLW BIT(6) +#define NWL_DSI_RX_PKT_HDR_RCVD BIT(7) +#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD BIT(8) +#define NWL_DSI_BTA_TIMEOUT BIT(29) +#define NWL_DSI_LP_RX_TIMEOUT BIT(30) +#define NWL_DSI_HS_TX_TIMEOUT BIT(31) + +#define NWL_DSI_IRQ_STATUS2 0x2a4 +#define NWL_DSI_SINGLE_BIT_ECC_ERR BIT(0) +#define NWL_DSI_MULTI_BIT_ECC_ERR BIT(1) +#define NWL_DSI_CRC_ERR BIT(2) + +#define NWL_DSI_IRQ_MASK 0x2a8 +#define NWL_DSI_SM_NOT_IDLE_MASK BIT(0) +#define NWL_DSI_TX_PKT_DONE_MASK BIT(1) +#define NWL_DSI_DPHY_DIRECTION_MASK BIT(2) +#define NWL_DSI_TX_FIFO_OVFLW_MASK BIT(3) +#define NWL_DSI_TX_FIFO_UDFLW_MASK BIT(4) +#define NWL_DSI_RX_FIFO_OVFLW_MASK BIT(5) +#define NWL_DSI_RX_FIFO_UDFLW_MASK BIT(6) +#define NWL_DSI_RX_PKT_HDR_RCVD_MASK BIT(7) +#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK BIT(8) +#define NWL_DSI_BTA_TIMEOUT_MASK BIT(29) +#define NWL_DSI_LP_RX_TIMEOUT_MASK BIT(30) +#define NWL_DSI_HS_TX_TIMEOUT_MASK BIT(31) + +#define NWL_DSI_IRQ_MASK2 0x2ac +#define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK BIT(0) +#define NWL_DSI_MULTI_BIT_ECC_ERR_MASK BIT(1) +#define NWL_DSI_CRC_ERR_MASK BIT(2) + +/* + * PKT_CONTROL format: + * [15: 0] - word count + * [17:16] - virtual channel + * [23:18] - data type + * [24] - LP or HS select (0 - LP, 1 - HS) + * [25] - perform BTA after packet is sent + * [26] - perform BTA only, no packet tx + */ +#define NWL_DSI_WC(x) FIELD_PREP(GENMASK(15, 0), (x)) +#define NWL_DSI_TX_VC(x) FIELD_PREP(GENMASK(17, 16), (x)) +#define NWL_DSI_TX_DT(x) FIELD_PREP(GENMASK(23, 18), (x)) +#define NWL_DSI_HS_SEL(x) FIELD_PREP(GENMASK(24, 24), (x)) +#define NWL_DSI_BTA_TX(x) FIELD_PREP(GENMASK(25, 25), (x)) +#define NWL_DSI_BTA_NO_TX(x) FIELD_PREP(GENMASK(26, 26), (x)) + +/* + * RX_PKT_HEADER format: + * [15: 0] - word count + * [21:16] - data type + * [23:22] - virtual channel + */ +#define NWL_DSI_RX_DT(x) FIELD_GET(GENMASK(21, 16), (x)) +#define NWL_DSI_RX_VC(x) FIELD_GET(GENMASK(23, 22), (x)) + +/* DSI Video mode */ +#define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES 0 +#define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS BIT(0) +#define NWL_DSI_VM_BURST_MODE BIT(1) + +/* * DPI color coding */ +#define NWL_DSI_DPI_16_BIT_565_PACKED 0 +#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1 +#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2 +#define NWL_DSI_DPI_18_BIT_PACKED 3 +#define NWL_DSI_DPI_18_BIT_ALIGNED 4 +#define NWL_DSI_DPI_24_BIT 5 + +/* * DPI Pixel format */ +#define NWL_DSI_PIXEL_FORMAT_16 0 +#define NWL_DSI_PIXEL_FORMAT_18 BIT(0) +#define NWL_DSI_PIXEL_FORMAT_18L BIT(1) +#define NWL_DSI_PIXEL_FORMAT_24 (BIT(0) | BIT(1)) + +#endif /* __NWL_DSI_H__ */ diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 8461ee8304ba..e933f1c47f5d 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -311,6 +311,7 @@ EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed); /** * drm_panel_bridge_connector - return the connector for the panel bridge + * @bridge: The drm_bridge. * * drm_panel_bridge creates the connector. * This function gives external access to the connector. diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c index f81f81b7051f..b1258f0ed205 100644 --- a/drivers/gpu/drm/bridge/sii9234.c +++ b/drivers/gpu/drm/bridge/sii9234.c @@ -836,7 +836,8 @@ static int sii9234_init_resources(struct sii9234 *ctx, ctx->supplies[3].supply = "cvcc12"; ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies); if (ret) { - dev_err(ctx->dev, "regulator_bulk failed\n"); + if (ret != -EPROBE_DEFER) + dev_err(ctx->dev, "regulator_bulk failed\n"); return ret; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 383b1073d7de..30681398cfb0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -92,6 +92,12 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = { { 0x6756, 0x78ab, 0x2000, 0x0200 } }; +static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = { + { 0x1b7c, 0x0000, 0x0000, 0x0020 }, + { 0x0000, 0x1b7c, 0x0000, 0x0020 }, + { 0x0000, 0x0000, 0x1b7c, 0x0020 } +}; + struct hdmi_vmode { bool mdataenablepolarity; @@ -109,6 +115,7 @@ struct hdmi_data_info { unsigned int pix_repet_factor; unsigned int hdcp_enable; struct hdmi_vmode video_mode; + bool rgb_limited_range; }; struct dw_hdmi_i2c { @@ -956,7 +963,14 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi) static int is_color_space_conversion(struct dw_hdmi *hdmi) { - return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format; + struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; + bool is_input_rgb, is_output_rgb; + + is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format); + is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format); + + return (is_input_rgb != is_output_rgb) || + (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range); } static int is_color_space_decimation(struct dw_hdmi *hdmi) @@ -983,28 +997,37 @@ static int is_color_space_interpolation(struct dw_hdmi *hdmi) return 0; } +static bool is_csc_needed(struct dw_hdmi *hdmi) +{ + return is_color_space_conversion(hdmi) || + is_color_space_decimation(hdmi) || + is_color_space_interpolation(hdmi); +} + static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi) { const u16 (*csc_coeff)[3][4] = &csc_coeff_default; + bool is_input_rgb, is_output_rgb; unsigned i; u32 csc_scale = 1; - if (is_color_space_conversion(hdmi)) { - if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { - if (hdmi->hdmi_data.enc_out_encoding == - V4L2_YCBCR_ENC_601) - csc_coeff = &csc_coeff_rgb_out_eitu601; - else - csc_coeff = &csc_coeff_rgb_out_eitu709; - } else if (hdmi_bus_fmt_is_rgb( - hdmi->hdmi_data.enc_in_bus_format)) { - if (hdmi->hdmi_data.enc_out_encoding == - V4L2_YCBCR_ENC_601) - csc_coeff = &csc_coeff_rgb_in_eitu601; - else - csc_coeff = &csc_coeff_rgb_in_eitu709; - csc_scale = 0; - } + is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format); + is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format); + + if (!is_input_rgb && is_output_rgb) { + if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601) + csc_coeff = &csc_coeff_rgb_out_eitu601; + else + csc_coeff = &csc_coeff_rgb_out_eitu709; + } else if (is_input_rgb && !is_output_rgb) { + if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601) + csc_coeff = &csc_coeff_rgb_in_eitu601; + else + csc_coeff = &csc_coeff_rgb_in_eitu709; + csc_scale = 0; + } else if (is_input_rgb && is_output_rgb && + hdmi->hdmi_data.rgb_limited_range) { + csc_coeff = &csc_coeff_rgb_full_to_rgb_limited; } /* The CSC registers are sequential, alternating MSB then LSB */ @@ -1614,6 +1637,18 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) drm_hdmi_avi_infoframe_from_display_mode(&frame, &hdmi->connector, mode); + if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { + drm_hdmi_avi_infoframe_quant_range(&frame, &hdmi->connector, + mode, + hdmi->hdmi_data.rgb_limited_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL); + } else { + frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + frame.ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + } + if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) frame.colorspace = HDMI_COLORSPACE_YUV444; else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) @@ -1654,8 +1689,6 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } - frame.scan_mode = HDMI_SCAN_MODE_NONE; - /* * The Designware IP uses a different byte format from standard * AVI info frames, though generally the bits are in the correct @@ -2010,18 +2043,19 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi) hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); /* Enable csc path */ - if (is_color_space_conversion(hdmi)) { + if (is_csc_needed(hdmi)) { hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); - } - /* Enable color space conversion if needed */ - if (is_color_space_conversion(hdmi)) hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH, HDMI_MC_FLOWCTRL); - else + } else { + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); + hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS, HDMI_MC_FLOWCTRL); + } } /* Workaround to clear the overflow condition */ @@ -2119,6 +2153,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED) hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24; + hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi && + drm_default_rgb_quant_range(mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; + hdmi->hdmi_data.pix_repet_factor = 0; hdmi->hdmi_data.hdcp_enable = 0; hdmi->hdmi_data.video_mode.mdataenablepolarity = true; diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c index d2ff63ce8eaf..a36269717c3b 100644 --- a/drivers/gpu/drm/cirrus/cirrus.c +++ b/drivers/gpu/drm/cirrus/cirrus.c @@ -35,6 +35,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -509,11 +510,15 @@ static const struct drm_mode_config_funcs cirrus_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static void cirrus_mode_config_init(struct cirrus_device *cirrus) +static int cirrus_mode_config_init(struct cirrus_device *cirrus) { struct drm_device *dev = &cirrus->dev; + int ret; + + ret = drmm_mode_config_init(dev); + if (ret) + return ret; - drm_mode_config_init(dev); dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2; @@ -521,18 +526,12 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus) dev->mode_config.preferred_depth = 16; dev->mode_config.prefer_shadow = 0; dev->mode_config.funcs = &cirrus_mode_config_funcs; + + return 0; } /* ------------------------------------------------------------------ */ -static void cirrus_release(struct drm_device *dev) -{ - struct cirrus_device *cirrus = dev->dev_private; - - drm_mode_config_cleanup(dev); - kfree(cirrus); -} - DEFINE_DRM_GEM_FOPS(cirrus_fops); static struct drm_driver cirrus_driver = { @@ -546,7 +545,6 @@ static struct drm_driver cirrus_driver = { .fops = &cirrus_fops, DRM_GEM_SHMEM_DRIVER_OPS, - .release = cirrus_release, }; static int cirrus_pci_probe(struct pci_dev *pdev, @@ -560,7 +558,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, if (ret) return ret; - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) return ret; @@ -571,34 +569,38 @@ static int cirrus_pci_probe(struct pci_dev *pdev, ret = -ENOMEM; cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL); if (cirrus == NULL) - goto err_pci_release; + return ret; dev = &cirrus->dev; - ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev); - if (ret) - goto err_free_cirrus; + ret = devm_drm_dev_init(&pdev->dev, dev, &cirrus_driver); + if (ret) { + kfree(cirrus); + return ret; + } dev->dev_private = cirrus; + drmm_add_final_kfree(dev, cirrus); - ret = -ENOMEM; - cirrus->vram = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); if (cirrus->vram == NULL) - goto err_dev_put; + return -ENOMEM; - cirrus->mmio = ioremap(pci_resource_start(pdev, 1), - pci_resource_len(pdev, 1)); + cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); if (cirrus->mmio == NULL) - goto err_unmap_vram; + return -ENOMEM; - cirrus_mode_config_init(cirrus); + ret = cirrus_mode_config_init(cirrus); + if (ret) + return ret; ret = cirrus_conn_init(cirrus); if (ret < 0) - goto err_cleanup; + return ret; ret = cirrus_pipe_init(cirrus); if (ret < 0) - goto err_cleanup; + return ret; drm_mode_config_reset(dev); @@ -606,36 +608,18 @@ static int cirrus_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); ret = drm_dev_register(dev, 0); if (ret) - goto err_cleanup; + return ret; drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); return 0; - -err_cleanup: - drm_mode_config_cleanup(dev); - iounmap(cirrus->mmio); -err_unmap_vram: - iounmap(cirrus->vram); -err_dev_put: - drm_dev_put(dev); -err_free_cirrus: - kfree(cirrus); -err_pci_release: - pci_release_regions(pdev); - return ret; } static void cirrus_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - struct cirrus_device *cirrus = dev->dev_private; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); - iounmap(cirrus->mmio); - iounmap(cirrus->vram); - drm_dev_put(dev); - pci_release_regions(pdev); } static const struct pci_device_id pciidlist[] = { diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 9ccfbf213d72..965173fd0ac2 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1641,10 +1641,10 @@ static const struct drm_info_list drm_atomic_debugfs_list[] = { {"state", drm_state_info, 0}, }; -int drm_atomic_debugfs_init(struct drm_minor *minor) +void drm_atomic_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(drm_atomic_debugfs_list, - ARRAY_SIZE(drm_atomic_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(drm_atomic_debugfs_list, + ARRAY_SIZE(drm_atomic_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 531b876d0ed8..800ac39f3213 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -135,6 +135,7 @@ static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv, } } + fpriv->was_master = (ret == 0); return ret; } @@ -174,17 +175,77 @@ out_err: return ret; } +/* + * In the olden days the SET/DROP_MASTER ioctls used to return EACCES when + * CAP_SYS_ADMIN was not set. This was used to prevent rogue applications + * from becoming master and/or failing to release it. + * + * At the same time, the first client (for a given VT) is _always_ master. + * Thus in order for the ioctls to succeed, one had to _explicitly_ run the + * application as root or flip the setuid bit. + * + * If the CAP_SYS_ADMIN was missing, no other client could become master... + * EVER :-( Leading to a) the graphics session dying badly or b) a completely + * locked session. + * + * + * As some point systemd-logind was introduced to orchestrate and delegate + * master as applicable. It does so by opening the fd and passing it to users + * while in itself logind a) does the set/drop master per users' request and + * b) * implicitly drops master on VT switch. + * + * Even though logind looks like the future, there are a few issues: + * - some platforms don't have equivalent (Android, CrOS, some BSDs) so + * root is required _solely_ for SET/DROP MASTER. + * - applications may not be updated to use it, + * - any client which fails to drop master* can DoS the application using + * logind, to a varying degree. + * + * * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER. + * + * + * Here we implement the next best thing: + * - ensure the logind style of fd passing works unchanged, and + * - allow a client to drop/set master, iff it is/was master at a given point + * in time. + * + * Note: DROP_MASTER cannot be free for all, as an arbitrator user could: + * - DoS/crash the arbitrator - details would be implementation specific + * - open the node, become master implicitly and cause issues + * + * As a result this fixes the following when using root-less build w/o logind + * - startx + * - weston + * - various compositors based on wlroots + */ +static int +drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv) +{ + if (file_priv->pid == task_pid(current) && file_priv->was_master) + return 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + return 0; +} + int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret = 0; mutex_lock(&dev->master_mutex); + + ret = drm_master_check_perm(dev, file_priv); + if (ret) + goto out_unlock; + if (drm_is_current_master(file_priv)) goto out_unlock; if (dev->master) { - ret = -EINVAL; + ret = -EBUSY; goto out_unlock; } @@ -224,6 +285,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, int ret = -EINVAL; mutex_lock(&dev->master_mutex); + + ret = drm_master_check_perm(dev, file_priv); + if (ret) + goto out_unlock; + + ret = -EINVAL; if (!drm_is_current_master(file_priv)) goto out_unlock; diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 121481f6aa71..88eedee018d3 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -183,6 +183,12 @@ * plane does not expose the "alpha" property, then this is * assumed to be 1.0 * + * IN_FORMATS: + * Blob property which contains the set of buffer format and modifier + * pairs supported by this plane. The blob is a drm_format_modifier_blob + * struct. Without this property the plane doesn't support buffers with + * modifiers. Userspace cannot change this property. + * * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index dcabf5698333..ef26ac57f039 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -33,6 +33,7 @@ #include <linux/mm.h> #include <linux/mman.h> #include <linux/nospec.h> +#include <linux/pci.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> @@ -43,7 +44,6 @@ #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> -#include <drm/drm_pci.h> #include <drm/drm_print.h> #include "drm_legacy.h" diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 6b0c6ef8b9b3..8cb93f5209a4 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -457,10 +457,10 @@ static const struct drm_info_list drm_client_debugfs_list[] = { { "internal_clients", drm_client_debugfs_internal_clients, 0 }, }; -int drm_client_debugfs_init(struct drm_minor *minor) +void drm_client_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(drm_client_debugfs_list, - ARRAY_SIZE(drm_client_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(drm_client_debugfs_list, + ARRAY_SIZE(drm_client_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 644f0ad10671..b1099e1251a2 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1970,6 +1970,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector, else drm_reset_display_info(connector); + drm_update_tile_info(connector, edid); + drm_object_property_set_value(&connector->base, dev->mode_config.non_desktop_property, connector->display_info.non_desktop); @@ -2392,7 +2394,7 @@ EXPORT_SYMBOL(drm_mode_put_tile_group); * tile group or NULL if not found. */ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, - char topology[8]) + const char topology[8]) { struct drm_tile_group *tg; int id; @@ -2422,7 +2424,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group); * new tile group or NULL. */ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, - char topology[8]) + const char topology[8]) { struct drm_tile_group *tg; int ret; diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 16f2413403aa..da96b2f64d7e 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -82,6 +82,7 @@ int drm_mode_setcrtc(struct drm_device *dev, /* drm_mode_config.c */ int drm_modeset_register_all(struct drm_device *dev); void drm_modeset_unregister_all(struct drm_device *dev); +void drm_mode_config_validate(struct drm_device *dev); /* drm_modes.c */ const char *drm_get_mode_status_name(enum drm_mode_status status); @@ -224,7 +225,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, /* drm_atomic.c */ #ifdef CONFIG_DEBUG_FS struct drm_minor; -int drm_atomic_debugfs_init(struct drm_minor *minor); +void drm_atomic_debugfs_init(struct drm_minor *minor); #endif int __drm_atomic_helper_disable_plane(struct drm_plane *plane, @@ -278,3 +279,4 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, void drm_mode_fixup_1366x768(struct drm_display_mode *mode); void drm_reset_display_info(struct drm_connector *connector); u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid); +void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 4e673d318503..2bea22130703 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -172,8 +172,8 @@ static const struct file_operations drm_debugfs_fops = { * &struct drm_info_list in the given root directory. These files will be removed * automatically on drm_debugfs_cleanup(). */ -int drm_debugfs_create_files(const struct drm_info_list *files, int count, - struct dentry *root, struct drm_minor *minor) +void drm_debugfs_create_files(const struct drm_info_list *files, int count, + struct dentry *root, struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct drm_info_node *tmp; @@ -199,7 +199,6 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count, list_add(&tmp->list, &minor->debugfs_list); mutex_unlock(&minor->debugfs_lock); } - return 0; } EXPORT_SYMBOL(drm_debugfs_create_files); @@ -208,52 +207,28 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, { struct drm_device *dev = minor->dev; char name[64]; - int ret; INIT_LIST_HEAD(&minor->debugfs_list); mutex_init(&minor->debugfs_lock); sprintf(name, "%d", minor_id); minor->debugfs_root = debugfs_create_dir(name, root); - ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, - minor->debugfs_root, minor); - if (ret) { - debugfs_remove(minor->debugfs_root); - minor->debugfs_root = NULL; - DRM_ERROR("Failed to create core drm debugfs files\n"); - return ret; - } + drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); if (drm_drv_uses_atomic_modeset(dev)) { - ret = drm_atomic_debugfs_init(minor); - if (ret) { - DRM_ERROR("Failed to create atomic debugfs files\n"); - return ret; - } + drm_atomic_debugfs_init(minor); } if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = drm_framebuffer_debugfs_init(minor); - if (ret) { - DRM_ERROR("Failed to create framebuffer debugfs file\n"); - return ret; - } + drm_framebuffer_debugfs_init(minor); - ret = drm_client_debugfs_init(minor); - if (ret) { - DRM_ERROR("Failed to create client debugfs file\n"); - return ret; - } + drm_client_debugfs_init(minor); } - if (dev->driver->debugfs_init) { - ret = dev->driver->debugfs_init(minor); - if (ret) { - DRM_ERROR("DRM: Driver failed to initialize " - "/sys/kernel/debug/dri.\n"); - return ret; - } - } + if (dev->driver->debugfs_init) + dev->driver->debugfs_init(minor); + return 0; } diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index a7add55a85b4..d07ba54ec945 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -34,9 +34,9 @@ */ #include <linux/export.h> +#include <linux/pci.h> #include <drm/drm_drv.h> -#include <drm/drm_pci.h> #include <drm/drm_print.h> #include "drm_legacy.h" diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index c6fbe6e6bc9d..612a59ec8116 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1238,6 +1238,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { { OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) }, /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) }, + /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */ + { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) }, }; #undef OUI @@ -1533,3 +1535,97 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S return num_bpc; } EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs); + +/** + * drm_dp_get_phy_test_pattern() - get the requested pattern from the sink. + * @aux: DisplayPort AUX channel + * @data: DP phy compliance test parameters. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data) +{ + int err; + u8 rate, lanes; + + err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate); + if (err < 0) + return err; + data->link_rate = drm_dp_bw_code_to_link_rate(rate); + + err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes); + if (err < 0) + return err; + data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK; + + if (lanes & DP_ENHANCED_FRAME_CAP) + data->enhanced_frame_cap = true; + + err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern); + if (err < 0) + return err; + + switch (data->phy_pattern) { + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + &data->custom80, sizeof(data->custom80)); + if (err < 0) + return err; + + break; + case DP_PHY_TEST_PATTERN_CP2520: + err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET, + &data->hbr2_reset, + sizeof(data->hbr2_reset)); + if (err < 0) + return err; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_get_phy_test_pattern); + +/** + * drm_dp_set_phy_test_pattern() - set the pattern to the sink. + * @aux: DisplayPort AUX channel + * @data: DP phy compliance test parameters. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data, u8 dp_rev) +{ + int err, i; + u8 link_config[2]; + u8 test_pattern; + + link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate); + link_config[1] = data->num_lanes; + if (data->enhanced_frame_cap) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2); + if (err < 0) + return err; + + test_pattern = data->phy_pattern; + if (dp_rev < 0x12) { + test_pattern = (test_pattern << 2) & + DP_LINK_QUAL_PATTERN_11_MASK; + err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, + test_pattern); + if (err < 0) + return err; + } else { + for (i = 0; i < data->num_lanes; i++) { + err = drm_dp_dpcd_writeb(aux, + DP_LINK_QUAL_LANE0_SET + i, + test_pattern); + if (err < 0) + return err; + } + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_set_phy_test_pattern); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 70c4b7afed12..13213c4b77d1 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/iopoll.h> #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) #include <linux/stacktrace.h> @@ -687,51 +688,45 @@ static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body * raw->cur_len = idx; } -/* this adds a chunk of msg to the builder to get the final msg */ -static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, - u8 *replybuf, u8 replybuflen, bool hdr) +static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg, + struct drm_dp_sideband_msg_hdr *hdr, + u8 hdrlen) { - int ret; - u8 crc4; + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!hdr->somt && !msg->have_somt) + return false; - if (hdr) { - u8 hdrlen; - struct drm_dp_sideband_msg_hdr recv_hdr; - ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); - if (ret == false) { - print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); - return false; - } + /* get length contained in this portion */ + msg->curchunk_idx = 0; + msg->curchunk_len = hdr->msg_len; + msg->curchunk_hdrlen = hdrlen; - /* - * ignore out-of-order messages or messages that are part of a - * failed transaction - */ - if (!recv_hdr.somt && !msg->have_somt) - return false; + /* we have already gotten an somt - don't bother parsing */ + if (hdr->somt && msg->have_somt) + return false; - /* get length contained in this portion */ - msg->curchunk_len = recv_hdr.msg_len; - msg->curchunk_hdrlen = hdrlen; + if (hdr->somt) { + memcpy(&msg->initial_hdr, hdr, + sizeof(struct drm_dp_sideband_msg_hdr)); + msg->have_somt = true; + } + if (hdr->eomt) + msg->have_eomt = true; - /* we have already gotten an somt - don't bother parsing */ - if (recv_hdr.somt && msg->have_somt) - return false; + return true; +} - if (recv_hdr.somt) { - memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); - msg->have_somt = true; - } - if (recv_hdr.eomt) - msg->have_eomt = true; +/* this adds a chunk of msg to the builder to get the final msg */ +static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg, + u8 *replybuf, u8 replybuflen) +{ + u8 crc4; - /* copy the bytes for the remainder of this header chunk */ - msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); - memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); - } else { - memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); - msg->curchunk_idx += replybuflen; - } + memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); + msg->curchunk_idx += replybuflen; if (msg->curchunk_idx >= msg->curchunk_len) { /* do CRC */ @@ -1060,13 +1055,12 @@ static void build_link_address(struct drm_dp_sideband_msg_tx *msg) drm_dp_encode_sideband_req(&req, msg); } -static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) +static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) { struct drm_dp_sideband_msg_req_body req; req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE; drm_dp_encode_sideband_req(&req, msg); - return 0; } static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, @@ -1211,8 +1205,6 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } - mgr->is_waiting_for_dwn_reply = false; - } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1222,7 +1214,6 @@ out: } mutex_unlock(&mgr->qlock); - drm_dp_mst_kick_tx(mgr); return ret; } @@ -2798,11 +2789,9 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ - mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); - mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2842,8 +2831,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq) && - !mgr->is_waiting_for_dwn_reply) + if (list_is_singular(&mgr->tx_msg_downq)) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -3703,31 +3691,67 @@ out_fail: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, + struct drm_dp_mst_branch **mstb, int *seqno) { int len; u8 replyblock[32]; int replylen, curreply; int ret; + u8 hdrlen; + struct drm_dp_sideband_msg_hdr hdr; struct drm_dp_sideband_msg_rx *msg; - int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; - msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; + int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : + DP_SIDEBAND_MSG_DOWN_REP_BASE; + + if (!up) + *mstb = NULL; + *seqno = -1; len = min(mgr->max_dpcd_transaction_bytes, 16); - ret = drm_dp_dpcd_read(mgr->aux, basereg, - replyblock, len); + ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); if (ret != len) { DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); return false; } - ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); + + ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen); + if (ret == false) { + print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, + 1, replyblock, len, false); + DRM_DEBUG_KMS("ERROR: failed header\n"); + return false; + } + + *seqno = hdr.seqno; + + if (up) { + msg = &mgr->up_req_recv; + } else { + /* Caller is responsible for giving back this reference */ + *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad); + if (!*mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", + hdr.lct); + return false; + } + msg = &(*mstb)->down_rep_recv[hdr.seqno]; + } + + if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) { + DRM_DEBUG_KMS("sideband msg set header failed %d\n", + replyblock[0]); + return false; + } + + replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); + ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen); if (!ret) { DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); return false; } - replylen = msg->curchunk_len + msg->curchunk_hdrlen; - replylen -= len; + replylen = msg->curchunk_len + msg->curchunk_hdrlen - len; curreply = len; while (replylen > 0) { len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); @@ -3739,7 +3763,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) return false; } - ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); + ret = drm_dp_sideband_append_payload(msg, replyblock, len); if (!ret) { DRM_DEBUG_KMS("failed to build sideband msg\n"); return false; @@ -3754,67 +3778,63 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; - struct drm_dp_mst_branch *mstb; - struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr; - int slot = -1; + struct drm_dp_mst_branch *mstb = NULL; + struct drm_dp_sideband_msg_rx *msg = NULL; + int seqno = -1; - if (!drm_dp_get_one_sb_msg(mgr, false)) - goto clear_down_rep_recv; + if (!drm_dp_get_one_sb_msg(mgr, false, &mstb, &seqno)) + goto out_clear_reply; - if (!mgr->down_rep_recv.have_eomt) - return 0; + msg = &mstb->down_rep_recv[seqno]; - mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto clear_down_rep_recv; - } + /* Multi-packet message transmission, don't clear the reply */ + if (!msg->have_eomt) + goto out; /* find the message */ - slot = hdr->seqno; mutex_lock(&mgr->qlock); - txmsg = mstb->tx_slots[slot]; + txmsg = mstb->tx_slots[seqno]; /* remove from slots */ mutex_unlock(&mgr->qlock); if (!txmsg) { + struct drm_dp_sideband_msg_hdr *hdr; + hdr = &msg->initial_hdr; DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", mstb, hdr->seqno, hdr->lct, hdr->rad[0], - mgr->down_rep_recv.msg[0]); - goto no_msg; + msg->msg[0]); + goto out_clear_reply; } - drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); + drm_dp_sideband_parse_reply(msg, &txmsg->reply); - if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data); + } - memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); drm_dp_mst_topology_put_mstb(mstb); mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; - mstb->tx_slots[slot] = NULL; - mgr->is_waiting_for_dwn_reply = false; + mstb->tx_slots[seqno] = NULL; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); return 0; -no_msg: - drm_dp_mst_topology_put_mstb(mstb); -clear_down_rep_recv: - mutex_lock(&mgr->qlock); - mgr->is_waiting_for_dwn_reply = false; - mutex_unlock(&mgr->qlock); - memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); +out_clear_reply: + if (msg) + memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); +out: + if (mstb) + drm_dp_mst_topology_put_mstb(mstb); return 0; } @@ -3890,11 +3910,10 @@ static void drm_dp_mst_up_req_work(struct work_struct *work) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { - struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; struct drm_dp_pending_up_req *up_req; - bool seqno; + int seqno; - if (!drm_dp_get_one_sb_msg(mgr, true)) + if (!drm_dp_get_one_sb_msg(mgr, true, NULL, &seqno)) goto out; if (!mgr->up_req_recv.have_eomt) @@ -3907,7 +3926,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) } INIT_LIST_HEAD(&up_req->next); - seqno = hdr->seqno; drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && @@ -3941,7 +3959,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) res_stat->available_pbn); } - up_req->hdr = *hdr; + up_req->hdr = mgr->up_req_recv.initial_hdr; mutex_lock(&mgr->up_req_lock); list_add_tail(&up_req->next, &mgr->up_req_list); mutex_unlock(&mgr->up_req_lock); @@ -4047,27 +4065,6 @@ out: EXPORT_SYMBOL(drm_dp_mst_detect_port); /** - * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not - * @mgr: manager for this port - * @port: unverified pointer to a port. - * - * This returns whether the port supports audio or not. - */ -bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port) -{ - bool ret = false; - - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) - return ret; - ret = port->has_audio; - drm_dp_mst_topology_put_port(port); - return ret; -} -EXPORT_SYMBOL(drm_dp_mst_port_has_audio); - -/** * drm_dp_mst_get_edid() - get EDID for an MST port * @connector: toplevel connector to get EDID for * @mgr: manager for this port @@ -4443,42 +4440,58 @@ fail: return ret; } +static int do_get_act_status(struct drm_dp_aux *aux) +{ + int ret; + u8 status; + + ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + if (ret < 0) + return ret; + + return status; +} /** - * drm_dp_check_act_status() - Check ACT handled status. + * drm_dp_check_act_status() - Polls for ACT handled status. * @mgr: manager to use * - * Check the payload status bits in the DPCD for ACT handled completion. + * Tries waiting for the MST hub to finish updating it's payload table by + * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really + * take that long). + * + * Returns: + * 0 if the ACT was handled in time, negative error code on failure. */ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) { - u8 status; - int ret; - int count = 0; - - do { - ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); - - if (ret < 0) { - DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); - goto fail; - } - - if (status & DP_PAYLOAD_ACT_HANDLED) - break; - count++; - udelay(100); - - } while (count < 30); - - if (!(status & DP_PAYLOAD_ACT_HANDLED)) { - DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); - ret = -EINVAL; - goto fail; + /* + * There doesn't seem to be any recommended retry count or timeout in + * the MST specification. Since some hubs have been observed to take + * over 1 second to update their payload allocations under certain + * conditions, we use a rather large timeout value. + */ + const int timeout_ms = 3000; + int ret, status; + + ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, + status & DP_PAYLOAD_ACT_HANDLED || status < 0, + 200, timeout_ms * USEC_PER_MSEC); + if (ret < 0 && status >= 0) { + DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n", + timeout_ms, status); + return -EINVAL; + } else if (status < 0) { + /* + * Failure here isn't unexpected - the hub may have + * just been unplugged + */ + DRM_DEBUG_KMS("Failed to read payload table status: %d\n", + status); + return status; } + return 0; -fail: - return ret; } EXPORT_SYMBOL(drm_dp_check_act_status); @@ -4669,28 +4682,18 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) + if (!list_empty(&mgr->tx_msg_downq)) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } -static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port) +static inline void +drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) { - if (!port->connector) - return; - - if (port->mgr->cbs->destroy_connector) { - port->mgr->cbs->destroy_connector(port->mgr, port->connector); - } else { + if (port->connector) { drm_connector_unregister(port->connector); drm_connector_put(port->connector); } -} - -static inline void -drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) -{ - drm_dp_destroy_connector(port); drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); drm_dp_mst_put_port_malloc(port); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7b1a628d1f6e..c15c9b4540e1 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -39,6 +39,7 @@ #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_managed.h> #include <drm/drm_mode_object.h> #include <drm/drm_print.h> @@ -92,13 +93,27 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, } } +static void drm_minor_alloc_release(struct drm_device *dev, void *data) +{ + struct drm_minor *minor = data; + unsigned long flags; + + WARN_ON(dev != minor->dev); + + put_device(minor->kdev); + + spin_lock_irqsave(&drm_minor_lock, flags); + idr_remove(&drm_minors_idr, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); +} + static int drm_minor_alloc(struct drm_device *dev, unsigned int type) { struct drm_minor *minor; unsigned long flags; int r; - minor = kzalloc(sizeof(*minor), GFP_KERNEL); + minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); if (!minor) return -ENOMEM; @@ -116,46 +131,20 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type) idr_preload_end(); if (r < 0) - goto err_free; + return r; minor->index = r; + r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); + if (r) + return r; + minor->kdev = drm_sysfs_minor_alloc(minor); - if (IS_ERR(minor->kdev)) { - r = PTR_ERR(minor->kdev); - goto err_index; - } + if (IS_ERR(minor->kdev)) + return PTR_ERR(minor->kdev); *drm_minor_get_slot(dev, type) = minor; return 0; - -err_index: - spin_lock_irqsave(&drm_minor_lock, flags); - idr_remove(&drm_minors_idr, minor->index); - spin_unlock_irqrestore(&drm_minor_lock, flags); -err_free: - kfree(minor); - return r; -} - -static void drm_minor_free(struct drm_device *dev, unsigned int type) -{ - struct drm_minor **slot, *minor; - unsigned long flags; - - slot = drm_minor_get_slot(dev, type); - minor = *slot; - if (!minor) - return; - - put_device(minor->kdev); - - spin_lock_irqsave(&drm_minor_lock, flags); - idr_remove(&drm_minors_idr, minor->index); - spin_unlock_irqrestore(&drm_minor_lock, flags); - - kfree(minor); - *slot = NULL; } static int drm_minor_register(struct drm_device *dev, unsigned int type) @@ -270,17 +259,22 @@ void drm_minor_release(struct drm_minor *minor) * any other resources allocated at device initialization and drop the driver's * reference to &drm_device using drm_dev_put(). * - * Note that the lifetime rules for &drm_device instance has still a lot of - * historical baggage. Hence use the reference counting provided by - * drm_dev_get() and drm_dev_put() only carefully. + * Note that any allocation or resource which is visible to userspace must be + * released only when the final drm_dev_put() is called, and not when the + * driver is unbound from the underlying physical struct &device. Best to use + * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and + * related functions. + * + * devres managed resources like devm_kmalloc() can only be used for resources + * directly related to the underlying hardware device, and only used in code + * paths fully protected by drm_dev_enter() and drm_dev_exit(). * * Display driver example * ~~~~~~~~~~~~~~~~~~~~~~ * * The following example shows a typical structure of a DRM display driver. * The example focus on the probe() function and the other functions that is - * almost always present and serves as a demonstration of devm_drm_dev_init() - * usage with its accompanying drm_driver->release callback. + * almost always present and serves as a demonstration of devm_drm_dev_init(). * * .. code-block:: c * @@ -290,19 +284,8 @@ void drm_minor_release(struct drm_minor *minor) * struct clk *pclk; * }; * - * static void driver_drm_release(struct drm_device *drm) - * { - * struct driver_device *priv = container_of(...); - * - * drm_mode_config_cleanup(drm); - * drm_dev_fini(drm); - * kfree(priv->userspace_facing); - * kfree(priv); - * } - * * static struct drm_driver driver_drm_driver = { * [...] - * .release = driver_drm_release, * }; * * static int driver_probe(struct platform_device *pdev) @@ -322,13 +305,16 @@ void drm_minor_release(struct drm_minor *minor) * * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver); * if (ret) { - * kfree(drm); + * kfree(priv); * return ret; * } + * drmm_add_final_kfree(drm, priv); * - * drm_mode_config_init(drm); + * ret = drmm_mode_config_init(drm); + * if (ret) + * return ret; * - * priv->userspace_facing = kzalloc(..., GFP_KERNEL); + * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); * if (!priv->userspace_facing) * return -ENOMEM; * @@ -580,6 +566,23 @@ static void drm_fs_inode_free(struct inode *inode) * used. */ +static void drm_dev_init_release(struct drm_device *dev, void *res) +{ + drm_legacy_ctxbitmap_cleanup(dev); + drm_legacy_remove_map_hash(dev); + drm_fs_inode_free(dev->anon_inode); + + put_device(dev->dev); + /* Prevent use-after-free in drm_managed_release when debugging is + * enabled. Slightly awkward, but can't really be helped. */ + dev->dev = NULL; + mutex_destroy(&dev->master_mutex); + mutex_destroy(&dev->clientlist_mutex); + mutex_destroy(&dev->filelist_mutex); + mutex_destroy(&dev->struct_mutex); + drm_legacy_destroy_members(dev); +} + /** * drm_dev_init - Initialise new DRM device * @dev: DRM device @@ -608,6 +611,9 @@ static void drm_fs_inode_free(struct inode *inode) * arbitrary offset, you must supply a &drm_driver.release callback and control * the finalization explicitly. * + * Note that drivers must call drmm_add_final_kfree() after this function has + * completed successfully. + * * RETURNS: * 0 on success, or error code on failure. */ @@ -629,6 +635,9 @@ int drm_dev_init(struct drm_device *dev, dev->dev = get_device(parent); dev->driver = driver; + INIT_LIST_HEAD(&dev->managed.resources); + spin_lock_init(&dev->managed.lock); + /* no per-device feature limits by default */ dev->driver_features = ~0u; @@ -644,26 +653,30 @@ int drm_dev_init(struct drm_device *dev, mutex_init(&dev->clientlist_mutex); mutex_init(&dev->master_mutex); + ret = drmm_add_action(dev, drm_dev_init_release, NULL); + if (ret) + return ret; + dev->anon_inode = drm_fs_inode_new(); if (IS_ERR(dev->anon_inode)) { ret = PTR_ERR(dev->anon_inode); DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); - goto err_free; + goto err; } if (drm_core_check_feature(dev, DRIVER_RENDER)) { ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); if (ret) - goto err_minors; + goto err; } ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); if (ret) - goto err_minors; + goto err; ret = drm_legacy_create_map_hash(dev); if (ret) - goto err_minors; + goto err; drm_legacy_ctxbitmap_init(dev); @@ -671,33 +684,19 @@ int drm_dev_init(struct drm_device *dev, ret = drm_gem_init(dev); if (ret) { DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); - goto err_ctxbitmap; + goto err; } } ret = drm_dev_set_unique(dev, dev_name(parent)); if (ret) - goto err_setunique; + goto err; return 0; -err_setunique: - if (drm_core_check_feature(dev, DRIVER_GEM)) - drm_gem_destroy(dev); -err_ctxbitmap: - drm_legacy_ctxbitmap_cleanup(dev); - drm_legacy_remove_map_hash(dev); -err_minors: - drm_minor_free(dev, DRM_MINOR_PRIMARY); - drm_minor_free(dev, DRM_MINOR_RENDER); - drm_fs_inode_free(dev->anon_inode); -err_free: - put_device(dev->dev); - mutex_destroy(&dev->master_mutex); - mutex_destroy(&dev->clientlist_mutex); - mutex_destroy(&dev->filelist_mutex); - mutex_destroy(&dev->struct_mutex); - drm_legacy_destroy_members(dev); +err: + drm_managed_release(dev); + return ret; } EXPORT_SYMBOL(drm_dev_init); @@ -714,8 +713,10 @@ static void devm_drm_dev_init_release(void *data) * @driver: DRM driver * * Managed drm_dev_init(). The DRM device initialized with this function is - * automatically put on driver detach using drm_dev_put(). You must supply a - * &drm_driver.release callback to control the finalization explicitly. + * automatically put on driver detach using drm_dev_put(). + * + * Note that drivers must call drmm_add_final_kfree() after this function has + * completed successfully. * * RETURNS: * 0 on success, or error code on failure. @@ -726,9 +727,6 @@ int devm_drm_dev_init(struct device *parent, { int ret; - if (WARN_ON(!driver->release)) - return -EINVAL; - ret = drm_dev_init(dev, driver, parent); if (ret) return ret; @@ -742,43 +740,6 @@ int devm_drm_dev_init(struct device *parent, EXPORT_SYMBOL(devm_drm_dev_init); /** - * drm_dev_fini - Finalize a dead DRM device - * @dev: DRM device - * - * Finalize a dead DRM device. This is the converse to drm_dev_init() and - * frees up all data allocated by it. All driver private data should be - * finalized first. Note that this function does not free the @dev, that is - * left to the caller. - * - * The ref-count of @dev must be zero, and drm_dev_fini() should only be called - * from a &drm_driver.release callback. - */ -void drm_dev_fini(struct drm_device *dev) -{ - drm_vblank_cleanup(dev); - - if (drm_core_check_feature(dev, DRIVER_GEM)) - drm_gem_destroy(dev); - - drm_legacy_ctxbitmap_cleanup(dev); - drm_legacy_remove_map_hash(dev); - drm_fs_inode_free(dev->anon_inode); - - drm_minor_free(dev, DRM_MINOR_PRIMARY); - drm_minor_free(dev, DRM_MINOR_RENDER); - - put_device(dev->dev); - - mutex_destroy(&dev->master_mutex); - mutex_destroy(&dev->clientlist_mutex); - mutex_destroy(&dev->filelist_mutex); - mutex_destroy(&dev->struct_mutex); - drm_legacy_destroy_members(dev); - kfree(dev->unique); -} -EXPORT_SYMBOL(drm_dev_fini); - -/** * drm_dev_alloc - Allocate new DRM device * @driver: DRM driver to allocate device for * @parent: Parent device object @@ -816,6 +777,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, return ERR_PTR(ret); } + drmm_add_final_kfree(dev, dev); + return dev; } EXPORT_SYMBOL(drm_dev_alloc); @@ -824,12 +787,13 @@ static void drm_dev_release(struct kref *ref) { struct drm_device *dev = container_of(ref, struct drm_device, ref); - if (dev->driver->release) { + if (dev->driver->release) dev->driver->release(dev); - } else { - drm_dev_fini(dev); - kfree(dev); - } + + drm_managed_release(dev); + + if (dev->managed.final_kfree) + kfree(dev->managed.final_kfree); } /** @@ -946,6 +910,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) struct drm_driver *driver = dev->driver; int ret; + if (!driver->load) + drm_mode_config_validate(dev); + + WARN_ON(!dev->managed.final_kfree); + if (drm_dev_needs_global_mutex(dev)) mutex_lock(&drm_global_mutex); @@ -1046,8 +1015,8 @@ EXPORT_SYMBOL(drm_dev_unregister); */ int drm_dev_set_unique(struct drm_device *dev, const char *name) { - kfree(dev->unique); - dev->unique = kstrdup(name, GFP_KERNEL); + drmm_kfree(dev, dev->unique); + dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); return dev->unique ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 116451101426..43b6ca364daa 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1583,8 +1583,6 @@ module_param_named(edid_fixup, edid_fixup, int, 0400); MODULE_PARM_DESC(edid_fixup, "Minimum number of valid EDID header bytes (0-8, default 6)"); -static void drm_get_displayid(struct drm_connector *connector, - struct edid *edid); static int validate_displayid(u8 *displayid, int length, int idx); static int drm_edid_block_checksum(const u8 *raw_edid) @@ -2018,18 +2016,13 @@ EXPORT_SYMBOL(drm_probe_ddc); struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct edid *edid; - if (connector->force == DRM_FORCE_OFF) return NULL; if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter)) return NULL; - edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); - if (edid) - drm_get_displayid(connector, edid); - return edid; + return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); } EXPORT_SYMBOL(drm_get_edid); @@ -3212,16 +3205,33 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id) } -static u8 *drm_find_displayid_extension(const struct edid *edid) +static u8 *drm_find_displayid_extension(const struct edid *edid, + int *length, int *idx) { - return drm_find_edid_extension(edid, DISPLAYID_EXT); + u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT); + struct displayid_hdr *base; + int ret; + + if (!displayid) + return NULL; + + /* EDID extensions block checksum isn't for us */ + *length = EDID_LENGTH - 1; + *idx = 1; + + ret = validate_displayid(displayid, *length, *idx); + if (ret) + return NULL; + + base = (struct displayid_hdr *)&displayid[*idx]; + *length = *idx + sizeof(*base) + base->bytes; + + return displayid; } static u8 *drm_find_cea_extension(const struct edid *edid) { - int ret; - int idx = 1; - int length = EDID_LENGTH; + int length, idx; struct displayid_block *block; u8 *cea; u8 *displayid; @@ -3232,14 +3242,10 @@ static u8 *drm_find_cea_extension(const struct edid *edid) return cea; /* CEA blocks can also be found embedded in a DisplayID block */ - displayid = drm_find_displayid_extension(edid); + displayid = drm_find_displayid_extension(edid, &length, &idx); if (!displayid) return NULL; - ret = validate_displayid(displayid, length, idx); - if (ret) - return NULL; - idx += sizeof(struct displayid_hdr); for_each_displayid_db(displayid, block, idx, length) { if (block->tag == DATA_BLOCK_CTA) { @@ -5084,7 +5090,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi static int validate_displayid(u8 *displayid, int length, int idx) { - int i; + int i, dispid_length; u8 csum = 0; struct displayid_hdr *base; @@ -5093,15 +5099,18 @@ static int validate_displayid(u8 *displayid, int length, int idx) DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", base->rev, base->bytes, base->prod_id, base->ext_count); - if (base->bytes + 5 > length - idx) + /* +1 for DispID checksum */ + dispid_length = sizeof(*base) + base->bytes + 1; + if (dispid_length > length - idx) return -EINVAL; - for (i = idx; i <= base->bytes + 5; i++) { - csum += displayid[i]; - } + + for (i = 0; i < dispid_length; i++) + csum += displayid[idx + i]; if (csum) { DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); return -EINVAL; } + return 0; } @@ -5180,20 +5189,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector, struct edid *edid) { u8 *displayid; - int ret; - int idx = 1; - int length = EDID_LENGTH; + int length, idx; struct displayid_block *block; int num_modes = 0; - displayid = drm_find_displayid_extension(edid); + displayid = drm_find_displayid_extension(edid, &length, &idx); if (!displayid) return 0; - ret = validate_displayid(displayid, length, idx); - if (ret) - return 0; - idx += sizeof(struct displayid_hdr); for_each_displayid_db(displayid, block, idx, length) { switch (block->tag) { @@ -5782,9 +5785,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); static int drm_parse_tiled_block(struct drm_connector *connector, - struct displayid_block *block) + const struct displayid_block *block) { - struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block; + const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block; u16 w, h; u8 tile_v_loc, tile_h_loc; u8 num_v_tile, num_h_tile; @@ -5835,22 +5838,12 @@ static int drm_parse_tiled_block(struct drm_connector *connector, return 0; } -static int drm_parse_display_id(struct drm_connector *connector, - u8 *displayid, int length, - bool is_edid_extension) +static int drm_displayid_parse_tiled(struct drm_connector *connector, + const u8 *displayid, int length, int idx) { - /* if this is an EDID extension the first byte will be 0x70 */ - int idx = 0; - struct displayid_block *block; + const struct displayid_block *block; int ret; - if (is_edid_extension) - idx = 1; - - ret = validate_displayid(displayid, length, idx); - if (ret) - return ret; - idx += sizeof(struct displayid_hdr); for_each_displayid_db(displayid, block, idx, length) { DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n", @@ -5862,12 +5855,6 @@ static int drm_parse_display_id(struct drm_connector *connector, if (ret) return ret; break; - case DATA_BLOCK_TYPE_1_DETAILED_TIMING: - /* handled in mode gathering code. */ - break; - case DATA_BLOCK_CTA: - /* handled in the cea parser code. */ - break; default: DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag); break; @@ -5876,19 +5863,21 @@ static int drm_parse_display_id(struct drm_connector *connector, return 0; } -static void drm_get_displayid(struct drm_connector *connector, - struct edid *edid) +void drm_update_tile_info(struct drm_connector *connector, + const struct edid *edid) { - void *displayid = NULL; + const void *displayid = NULL; + int length, idx; int ret; + connector->has_tile = false; - displayid = drm_find_displayid_extension(edid); + displayid = drm_find_displayid_extension(edid, &length, &idx); if (!displayid) { /* drop reference to any tile group we had */ goto out_drop_ref; } - ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true); + ret = drm_displayid_parse_tiled(connector, displayid, length, idx); if (ret < 0) goto out_drop_ref; if (!connector->has_tile) diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 9801c0333eca..cb2349ad338d 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -2,7 +2,7 @@ /* * drm kms/fb cma (contiguous memory allocator) helper functions * - * Copyright (C) 2012 Analog Device Inc. + * Copyright (C) 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Based on udl_fbdev.c diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a9771de4d17e..02fc24026872 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -514,6 +514,14 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) if (ret) goto err_release; + /* + * TODO: We really should be smarter here and alloc an apperture + * for each IORESOURCE_MEM resource helper->dev->dev has and also + * init the ranges of the appertures based on the resources. + * Note some drivers currently count on there being only 1 empty + * aperture and fill this themselves, these will need to be dealt + * with somehow when fixing this. + */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; @@ -2162,6 +2170,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { * * This function sets up generic fbdev emulation for drivers that supports * dumb buffers with a virtual address and that can be mmap'ed. + * drm_fbdev_generic_setup() shall be called after the DRM driver registered + * the new DRM device with drm_dev_register(). * * Restore, hotplug events and teardown are all taken care of. Drivers that do * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. @@ -2178,29 +2188,30 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { * Setup will be retried on the next hotplug event. * * The fbdev is destroyed by drm_dev_unregister(). - * - * Returns: - * Zero on success or negative error code on failure. */ -int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +void drm_fbdev_generic_setup(struct drm_device *dev, + unsigned int preferred_bpp) { struct drm_fb_helper *fb_helper; int ret; - WARN(dev->fb_helper, "fb_helper is already set!\n"); + drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); + drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); if (!drm_fbdev_emulation) - return 0; + return; fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return -ENOMEM; + if (!fb_helper) { + drm_err(dev, "Failed to allocate fb_helper\n"); + return; + } ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); if (ret) { kfree(fb_helper); drm_err(dev, "Failed to register client: %d\n", ret); - return ret; + return; } if (!preferred_bpp) @@ -2214,8 +2225,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); drm_client_register(&fb_helper->client); - - return 0; } EXPORT_SYMBOL(drm_fbdev_generic_setup); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index c4c704e01961..eb009d3ab48f 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -48,6 +48,11 @@ #include "drm_internal.h" #include "drm_legacy.h" +#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +#include <uapi/asm/mman.h> +#include <drm/drm_vma_manager.h> +#endif + /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); @@ -872,3 +877,139 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags) return file; } EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile); + +#ifdef CONFIG_MMU +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * drm_addr_inflate() attempts to construct an aligned area by inflating + * the area size and skipping the unaligned start of the area. + * adapted from shmem_get_unmapped_area() + */ +static unsigned long drm_addr_inflate(unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags, + unsigned long huge_size) +{ + unsigned long offset, inflated_len; + unsigned long inflated_addr; + unsigned long inflated_offset; + + offset = (pgoff << PAGE_SHIFT) & (huge_size - 1); + if (offset && offset + len < 2 * huge_size) + return addr; + if ((addr & (huge_size - 1)) == offset) + return addr; + + inflated_len = len + huge_size - PAGE_SIZE; + if (inflated_len > TASK_SIZE) + return addr; + if (inflated_len < len) + return addr; + + inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len, + 0, flags); + if (IS_ERR_VALUE(inflated_addr)) + return addr; + if (inflated_addr & ~PAGE_MASK) + return addr; + + inflated_offset = inflated_addr & (huge_size - 1); + inflated_addr += offset - inflated_offset; + if (inflated_offset > offset) + inflated_addr += huge_size; + + if (inflated_addr > TASK_SIZE - len) + return addr; + + return inflated_addr; +} + +/** + * drm_get_unmapped_area() - Get an unused user-space virtual memory area + * suitable for huge page table entries. + * @file: The struct file representing the address space being mmap()'d. + * @uaddr: Start address suggested by user-space. + * @len: Length of the area. + * @pgoff: The page offset into the address space. + * @flags: mmap flags + * @mgr: The address space manager used by the drm driver. This argument can + * probably be removed at some point when all drivers use the same + * address space manager. + * + * This function attempts to find an unused user-space virtual memory area + * that can accommodate the size we want to map, and that is properly + * aligned to facilitate huge page table entries matching actual + * huge pages or huge page aligned memory in buffer objects. Buffer objects + * are assumed to start at huge page boundary pfns (io memory) or be + * populated by huge pages aligned to the start of the buffer object + * (system- or coherent memory). Adapted from shmem_get_unmapped_area. + * + * Return: aligned user-space address. + */ +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr) +{ + unsigned long addr; + unsigned long inflated_addr; + struct drm_vma_offset_node *node; + + if (len > TASK_SIZE) + return -ENOMEM; + + /* + * @pgoff is the file page-offset the huge page boundaries of + * which typically aligns to physical address huge page boundaries. + * That's not true for DRM, however, where physical address huge + * page boundaries instead are aligned with the offset from + * buffer object start. So adjust @pgoff to be the offset from + * buffer object start. + */ + drm_vma_offset_lock_lookup(mgr); + node = drm_vma_offset_lookup_locked(mgr, pgoff, 1); + if (node) + pgoff -= node->vm_node.start; + drm_vma_offset_unlock_lookup(mgr); + + addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; + if (addr & ~PAGE_MASK) + return addr; + if (addr > TASK_SIZE - len) + return addr; + + if (len < HPAGE_PMD_SIZE) + return addr; + if (flags & MAP_FIXED) + return addr; + /* + * Our priority is to support MAP_SHARED mapped hugely; + * and support MAP_PRIVATE mapped hugely too, until it is COWed. + * But if caller specified an address hint, respect that as before. + */ + if (uaddr) + return addr; + + inflated_addr = drm_addr_inflate(addr, len, pgoff, flags, + HPAGE_PMD_SIZE); + + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && + len >= HPAGE_PUD_SIZE) + inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff, + flags, HPAGE_PUD_SIZE); + return inflated_addr; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr) +{ + return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +EXPORT_SYMBOL_GPL(drm_get_unmapped_area); +#endif /* CONFIG_MMU */ diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 57ac94ce9b9e..0375b3d7f8d0 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -1207,10 +1207,10 @@ static const struct drm_info_list drm_framebuffer_debugfs_list[] = { { "framebuffer", drm_framebuffer_info, 0 }, }; -int drm_framebuffer_debugfs_init(struct drm_minor *minor) +void drm_framebuffer_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(drm_framebuffer_debugfs_list, - ARRAY_SIZE(drm_framebuffer_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(drm_framebuffer_debugfs_list, + ARRAY_SIZE(drm_framebuffer_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 37627d06fb06..7bf628e13023 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -44,6 +44,7 @@ #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> +#include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/drm_vma_manager.h> @@ -77,6 +78,12 @@ * up at a later date, and as our interface with shmfs for memory allocation. */ +static void +drm_gem_init_release(struct drm_device *dev, void *ptr) +{ + drm_vma_offset_manager_destroy(dev->vma_offset_manager); +} + /** * drm_gem_init - Initialize the GEM device fields * @dev: drm_devic structure to initialize @@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev) mutex_init(&dev->object_name_lock); idr_init_base(&dev->object_name_idr, 1); - vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); + vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), + GFP_KERNEL); if (!vma_offset_manager) { DRM_ERROR("out of memory\n"); return -ENOMEM; @@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev) DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); - return 0; -} - -void -drm_gem_destroy(struct drm_device *dev) -{ - - drm_vma_offset_manager_destroy(dev->vma_offset_manager); - kfree(dev->vma_offset_manager); - dev->vma_offset_manager = NULL; + return drmm_add_action(dev, drm_gem_init_release, NULL); } /** @@ -432,7 +431,7 @@ err_unref: * drm_gem_handle_create - create a gem handle for an object * @file_priv: drm file-private structure to register the handle for * @obj: object to register - * @handlep: pionter to return the created handle to the caller + * @handlep: pointer to return the created handle to the caller * * Create a handle for this object. This adds a handle reference to the object, * which includes a regular reference count. Callers will likely want to diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 3a7ace19a902..ccc2c71fa491 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -21,6 +21,13 @@ #include <drm/drm_modeset_helper.h> #include <drm/drm_simple_kms_helper.h> +#define AFBC_HEADER_SIZE 16 +#define AFBC_TH_LAYOUT_ALIGNMENT 8 +#define AFBC_HDR_ALIGN 64 +#define AFBC_SUPERBLOCK_PIXELS 256 +#define AFBC_SUPERBLOCK_ALIGNMENT 128 +#define AFBC_TH_BODY_START_ALIGNMENT 4096 + /** * DOC: overview * @@ -54,32 +61,25 @@ struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, } EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj); -static struct drm_framebuffer * -drm_gem_fb_alloc(struct drm_device *dev, +static int +drm_gem_fb_init(struct drm_device *dev, + struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **obj, unsigned int num_planes, const struct drm_framebuffer_funcs *funcs) { - struct drm_framebuffer *fb; int ret, i; - fb = kzalloc(sizeof(*fb), GFP_KERNEL); - if (!fb) - return ERR_PTR(-ENOMEM); - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); for (i = 0; i < num_planes; i++) fb->obj[i] = obj[i]; ret = drm_framebuffer_init(dev, fb, funcs); - if (ret) { + if (ret) drm_err(dev, "Failed to init framebuffer: %d\n", ret); - kfree(fb); - return ERR_PTR(ret); - } - return fb; + return ret; } /** @@ -123,10 +123,13 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file, EXPORT_SYMBOL(drm_gem_fb_create_handle); /** - * drm_gem_fb_create_with_funcs() - Helper function for the - * &drm_mode_config_funcs.fb_create - * callback + * drm_gem_fb_init_with_funcs() - Helper function for implementing + * &drm_mode_config_funcs.fb_create + * callback in cases when the driver + * allocates a subclass of + * struct drm_framebuffer * @dev: DRM device + * @fb: framebuffer object * @file: DRM file that holds the GEM handle(s) backing the framebuffer * @mode_cmd: Metadata from the userspace framebuffer creation request * @funcs: vtable to be used for the new framebuffer object @@ -134,23 +137,26 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle); * This function can be used to set &drm_framebuffer_funcs for drivers that need * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to * change &drm_framebuffer_funcs. The function does buffer size validation. + * The buffer size validation is for a general case, though, so users should + * pay attention to the checks being appropriate for them or, at least, + * non-conflicting. * * Returns: - * Pointer to a &drm_framebuffer on success or an error pointer on failure. + * Zero or a negative error code. */ -struct drm_framebuffer * -drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, - const struct drm_mode_fb_cmd2 *mode_cmd, - const struct drm_framebuffer_funcs *funcs) +int drm_gem_fb_init_with_funcs(struct drm_device *dev, + struct drm_framebuffer *fb, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs) { const struct drm_format_info *info; struct drm_gem_object *objs[4]; - struct drm_framebuffer *fb; int ret, i; info = drm_get_format_info(dev, mode_cmd); if (!info) - return ERR_PTR(-EINVAL); + return -EINVAL; for (i = 0; i < info->num_planes; i++) { unsigned int width = mode_cmd->width / (i ? info->hsub : 1); @@ -175,19 +181,55 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, } } - fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs); - if (IS_ERR(fb)) { - ret = PTR_ERR(fb); + ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs); + if (ret) goto err_gem_object_put; - } - return fb; + return 0; err_gem_object_put: for (i--; i >= 0; i--) drm_gem_object_put_unlocked(objs[i]); - return ERR_PTR(ret); + return ret; +} +EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs); + +/** + * drm_gem_fb_create_with_funcs() - Helper function for the + * &drm_mode_config_funcs.fb_create + * callback + * @dev: DRM device + * @file: DRM file that holds the GEM handle(s) backing the framebuffer + * @mode_cmd: Metadata from the userspace framebuffer creation request + * @funcs: vtable to be used for the new framebuffer object + * + * This function can be used to set &drm_framebuffer_funcs for drivers that need + * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to + * change &drm_framebuffer_funcs. The function does buffer size validation. + * + * Returns: + * Pointer to a &drm_framebuffer on success or an error pointer on failure. + */ +struct drm_framebuffer * +drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs) +{ + struct drm_framebuffer *fb; + int ret; + + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) + return ERR_PTR(-ENOMEM); + + ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs); + if (ret) { + kfree(fb); + return ERR_PTR(ret); + } + + return fb; } EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs); @@ -265,6 +307,132 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, } EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); +static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + const struct drm_format_info *info; + + info = drm_get_format_info(dev, mode_cmd); + + /* use whatever a driver has set */ + if (info->cpp[0]) + return info->cpp[0] * 8; + + /* guess otherwise */ + switch (info->format) { + case DRM_FORMAT_YUV420_8BIT: + return 12; + case DRM_FORMAT_YUV420_10BIT: + return 15; + case DRM_FORMAT_VUY101010: + return 30; + default: + break; + } + + /* all attempts failed */ + return 0; +} + +static int drm_gem_afbc_min_size(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_afbc_framebuffer *afbc_fb) +{ + __u32 n_blocks, w_alignment, h_alignment, hdr_alignment; + /* remove bpp when all users properly encode cpp in drm_format_info */ + __u32 bpp; + + switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { + case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: + afbc_fb->block_width = 16; + afbc_fb->block_height = 16; + break; + case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8: + afbc_fb->block_width = 32; + afbc_fb->block_height = 8; + break; + /* no user exists yet - fall through */ + case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4: + case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4: + default: + drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n", + mode_cmd->modifier[0] + & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK); + return -EINVAL; + } + + /* tiled header afbc */ + w_alignment = afbc_fb->block_width; + h_alignment = afbc_fb->block_height; + hdr_alignment = AFBC_HDR_ALIGN; + if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) { + w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT; + h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT; + hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT; + } + + afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment); + afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment); + afbc_fb->offset = mode_cmd->offsets[0]; + + bpp = drm_gem_afbc_get_bpp(dev, mode_cmd); + if (!bpp) { + drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp); + return -EINVAL; + } + + n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height) + / AFBC_SUPERBLOCK_PIXELS; + afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment); + afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8, + AFBC_SUPERBLOCK_ALIGNMENT); + + return 0; +} + +/** + * drm_gem_fb_afbc_init() - Helper function for drivers using afbc to + * fill and validate all the afbc-specific + * struct drm_afbc_framebuffer members + * + * @dev: DRM device + * @afbc_fb: afbc-specific framebuffer + * @mode_cmd: Metadata from the userspace framebuffer creation request + * @afbc_fb: afbc framebuffer + * + * This function can be used by drivers which support afbc to complete + * the preparation of struct drm_afbc_framebuffer. It must be called after + * allocating the said struct and calling drm_gem_fb_init_with_funcs(). + * It is caller's responsibility to put afbc_fb->base.obj objects in case + * the call is unsuccessful. + * + * Returns: + * Zero on success or a negative error value on failure. + */ +int drm_gem_fb_afbc_init(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_afbc_framebuffer *afbc_fb) +{ + const struct drm_format_info *info; + struct drm_gem_object **objs; + int ret; + + objs = afbc_fb->base.obj; + info = drm_get_format_info(dev, mode_cmd); + if (!info) + return -EINVAL; + + ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb); + if (ret < 0) + return ret; + + if (objs[0]->size < afbc_fb->afbc_size) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init); + /** * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer * @plane: Plane diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 92a11bb42365..8b2d5c945c95 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/module.h> + #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_mode.h> @@ -18,13 +21,93 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; /** * DOC: overview * - * This library provides a GEM buffer object that is backed by video RAM - * (VRAM). It can be used for framebuffer devices with dedicated memory. + * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM + * buffer object that is backed by video RAM (VRAM). It can be used for + * framebuffer devices with dedicated memory. * * The data structure &struct drm_vram_mm and its helpers implement a memory - * manager for simple framebuffer devices with dedicated video memory. Buffer - * objects are either placed in video RAM or evicted to system memory. The rsp. - * buffer object is provided by &struct drm_gem_vram_object. + * manager for simple framebuffer devices with dedicated video memory. GEM + * VRAM buffer objects are either placed in the video memory or remain evicted + * to system memory. + * + * With the GEM interface userspace applications create, manage and destroy + * graphics buffers, such as an on-screen framebuffer. GEM does not provide + * an implementation of these interfaces. It's up to the DRM driver to + * provide an implementation that suits the hardware. If the hardware device + * contains dedicated video memory, the DRM driver can use the VRAM helper + * library. Each active buffer object is stored in video RAM. Active + * buffer are used for drawing the current frame, typically something like + * the frame's scanout buffer or the cursor image. If there's no more space + * left in VRAM, inactive GEM objects can be moved to system memory. + * + * The easiest way to use the VRAM helper library is to call + * drm_vram_helper_alloc_mm(). The function allocates and initializes an + * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use + * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and + * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations; + * as illustrated below. + * + * .. code-block:: c + * + * struct file_operations fops ={ + * .owner = THIS_MODULE, + * DRM_VRAM_MM_FILE_OPERATION + * }; + * struct drm_driver drv = { + * .driver_feature = DRM_ ... , + * .fops = &fops, + * DRM_GEM_VRAM_DRIVER + * }; + * + * int init_drm_driver() + * { + * struct drm_device *dev; + * uint64_t vram_base; + * unsigned long vram_size; + * int ret; + * + * // setup device, vram base and size + * // ... + * + * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); + * if (ret) + * return ret; + * return 0; + * } + * + * This creates an instance of &struct drm_vram_mm, exports DRM userspace + * interfaces for GEM buffer management and initializes file operations to + * allow for accessing created GEM buffers. With this setup, the DRM driver + * manages an area of video RAM with VRAM MM and provides GEM VRAM objects + * to userspace. + * + * To clean up the VRAM memory management, call drm_vram_helper_release_mm() + * in the driver's clean-up code. + * + * .. code-block:: c + * + * void fini_drm_driver() + * { + * struct drm_device *dev = ...; + * + * drm_vram_helper_release_mm(dev); + * } + * + * For drawing or scanout operations, buffer object have to be pinned in video + * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or + * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system + * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards. + * + * A buffer object that is pinned in video RAM has a fixed address within that + * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically + * it's used to program the hardware's scanout engine for framebuffers, set + * the cursor overlay's image for a mouse cursor, or use it as input to the + * hardware's draing engine. + * + * To access a buffer object's memory from the DRM driver, call + * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address + * space and returns the memory address. Use drm_gem_vram_kunmap() to + * release the mapping. */ /* @@ -670,9 +753,9 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); * @plane: a DRM plane * @new_state: the plane's new state * - * During plane updates, this function pins the GEM VRAM - * objects of the plane's new framebuffer to VRAM. Call - * drm_gem_vram_plane_helper_cleanup_fb() to unpin them. + * During plane updates, this function sets the plane's fence and + * pins the GEM VRAM objects of the plane's new framebuffer to VRAM. + * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them. * * Returns: * 0 on success, or @@ -698,6 +781,10 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, goto err_drm_gem_vram_unpin; } + ret = drm_gem_fb_prepare_fb(plane, new_state); + if (ret) + goto err_drm_gem_vram_unpin; + return 0; err_drm_gem_vram_unpin: @@ -1018,7 +1105,6 @@ static struct ttm_bo_driver bo_driver = { * struct drm_vram_mm */ -#if defined(CONFIG_DEBUG_FS) static int drm_vram_mm_debugfs(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1035,27 +1121,18 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data) static const struct drm_info_list drm_vram_mm_debugfs_list[] = { { "vram-mm", drm_vram_mm_debugfs, 0, NULL }, }; -#endif /** * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file. * * @minor: drm minor device. * - * Returns: - * 0 on success, or - * a negative error code otherwise. */ -int drm_vram_mm_debugfs_init(struct drm_minor *minor) +void drm_vram_mm_debugfs_init(struct drm_minor *minor) { - int ret = 0; - -#if defined(CONFIG_DEBUG_FS) - ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list, - ARRAY_SIZE(drm_vram_mm_debugfs_list), - minor->debugfs_root, minor); -#endif - return ret; + drm_debugfs_create_files(drm_vram_mm_debugfs_list, + ARRAY_SIZE(drm_vram_mm_debugfs_list), + minor->debugfs_root, minor); } EXPORT_SYMBOL(drm_vram_mm_debugfs_init); @@ -1202,3 +1279,6 @@ drm_vram_helper_mode_valid(struct drm_device *dev, return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp); } EXPORT_SYMBOL(drm_vram_helper_mode_valid); + +MODULE_DESCRIPTION("DRM VRAM memory-management helpers"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 5714a78365ac..2470a352730b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -89,9 +89,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); +/* drm_managed.c */ +void drm_managed_release(struct drm_device *dev); + /* drm_vblank.c */ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe); -void drm_vblank_cleanup(struct drm_device *dev); /* IOCTLS */ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, @@ -141,7 +143,6 @@ void drm_sysfs_lease_event(struct drm_device *dev); /* drm_gem.c */ struct drm_gem_object; int drm_gem_init(struct drm_device *dev); -void drm_gem_destroy(struct drm_device *dev); int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); @@ -235,4 +236,4 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, /* drm_framebuffer.c */ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, const struct drm_framebuffer *fb); -int drm_framebuffer_debugfs_init(struct drm_minor *minor); +void drm_framebuffer_debugfs_init(struct drm_minor *minor); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 9e41972c4bbc..73e31dd4e442 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -599,8 +599,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c new file mode 100644 index 000000000000..9cebfe370a65 --- /dev/null +++ b/drivers/gpu/drm/drm_managed.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel + * + * Based on drivers/base/devres.c + */ + +#include <drm/drm_managed.h> + +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <drm/drm_device.h> +#include <drm/drm_print.h> + +/** + * DOC: managed resources + * + * Inspired by struct &device managed resources, but tied to the lifetime of + * struct &drm_device, which can outlive the underlying physical device, usually + * when userspace has some open files and other handles to resources still open. + * + * Release actions can be added with drmm_add_action(), memory allocations can + * be done directly with drmm_kmalloc() and the related functions. Everything + * will be released on the final drm_dev_put() in reverse order of how the + * release actions have been added and memory has been allocated since driver + * loading started with drm_dev_init(). + * + * Note that release actions and managed memory can also be added and removed + * during the lifetime of the driver, all the functions are fully concurrent + * safe. But it is recommended to use managed resources only for resources that + * change rarely, if ever, during the lifetime of the &drm_device instance. + */ + +struct drmres_node { + struct list_head entry; + drmres_release_t release; + const char *name; + size_t size; +}; + +struct drmres { + struct drmres_node node; + /* + * Some archs want to perform DMA into kmalloc caches + * and need a guaranteed alignment larger than + * the alignment of a 64-bit integer. + * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same + * buffer alignment as if it was allocated by plain kmalloc(). + */ + u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; +}; + +static void free_dr(struct drmres *dr) +{ + kfree_const(dr->node.name); + kfree(dr); +} + +void drm_managed_release(struct drm_device *dev) +{ + struct drmres *dr, *tmp; + + drm_dbg_drmres(dev, "drmres release begin\n"); + list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) { + drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n", + dr, dr->node.name, dr->node.size); + + if (dr->node.release) + dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL); + + list_del(&dr->node.entry); + free_dr(dr); + } + drm_dbg_drmres(dev, "drmres release end\n"); +} + +/* + * Always inline so that kmalloc_track_caller tracks the actual interesting + * caller outside of drm_managed.c. + */ +static __always_inline struct drmres * alloc_dr(drmres_release_t release, + size_t size, gfp_t gfp, int nid) +{ + size_t tot_size; + struct drmres *dr; + + /* We must catch any near-SIZE_MAX cases that could overflow. */ + if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size))) + return NULL; + + dr = kmalloc_node_track_caller(tot_size, gfp, nid); + if (unlikely(!dr)) + return NULL; + + memset(dr, 0, offsetof(struct drmres, data)); + + INIT_LIST_HEAD(&dr->node.entry); + dr->node.release = release; + dr->node.size = size; + + return dr; +} + +static void del_dr(struct drm_device *dev, struct drmres *dr) +{ + list_del_init(&dr->node.entry); + + drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n", + dr, dr->node.name, (unsigned long) dr->node.size); +} + +static void add_dr(struct drm_device *dev, struct drmres *dr) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->managed.lock, flags); + list_add(&dr->node.entry, &dev->managed.resources); + spin_unlock_irqrestore(&dev->managed.lock, flags); + + drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n", + dr, dr->node.name, (unsigned long) dr->node.size); +} + +/** + * drmm_add_final_kfree - add release action for the final kfree() + * @dev: DRM device + * @container: pointer to the kmalloc allocation containing @dev + * + * Since the allocation containing the struct &drm_device must be allocated + * before it can be initialized with drm_dev_init() there's no way to allocate + * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the + * pointer for this final kfree() must be specified by calling this function. It + * will be released in the final drm_dev_put() for @dev, after all other release + * actions installed through drmm_add_action() have been processed. + */ +void drmm_add_final_kfree(struct drm_device *dev, void *container) +{ + WARN_ON(dev->managed.final_kfree); + WARN_ON(dev < (struct drm_device *) container); + WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container))); + dev->managed.final_kfree = container; +} +EXPORT_SYMBOL(drmm_add_final_kfree); + +int __drmm_add_action(struct drm_device *dev, + drmres_release_t action, + void *data, const char *name) +{ + struct drmres *dr; + void **void_ptr; + + dr = alloc_dr(action, data ? sizeof(void*) : 0, + GFP_KERNEL | __GFP_ZERO, + dev_to_node(dev->dev)); + if (!dr) { + drm_dbg_drmres(dev, "failed to add action %s for %p\n", + name, data); + return -ENOMEM; + } + + dr->node.name = kstrdup_const(name, GFP_KERNEL); + if (data) { + void_ptr = (void **)&dr->data; + *void_ptr = data; + } + + add_dr(dev, dr); + + return 0; +} +EXPORT_SYMBOL(__drmm_add_action); + +int __drmm_add_action_or_reset(struct drm_device *dev, + drmres_release_t action, + void *data, const char *name) +{ + int ret; + + ret = __drmm_add_action(dev, action, data, name); + if (ret) + action(dev, data); + + return ret; +} +EXPORT_SYMBOL(__drmm_add_action_or_reset); + +/** + * drmm_kmalloc - &drm_device managed kmalloc() + * @dev: DRM device + * @size: size of the memory allocation + * @gfp: GFP allocation flags + * + * This is a &drm_device managed version of kmalloc(). The allocated memory is + * automatically freed on the final drm_dev_put(). Memory can also be freed + * before the final drm_dev_put() by calling drmm_kfree(). + */ +void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) +{ + struct drmres *dr; + + dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev)); + if (!dr) { + drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n", + size, gfp); + return NULL; + } + dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL); + + add_dr(dev, dr); + + return dr->data; +} +EXPORT_SYMBOL(drmm_kmalloc); + +/** + * drmm_kstrdup - &drm_device managed kstrdup() + * @dev: DRM device + * @s: 0-terminated string to be duplicated + * @gfp: GFP allocation flags + * + * This is a &drm_device managed version of kstrdup(). The allocated memory is + * automatically freed on the final drm_dev_put() and works exactly like a + * memory allocation obtained by drmm_kmalloc(). + */ +char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = drmm_kmalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} +EXPORT_SYMBOL_GPL(drmm_kstrdup); + +/** + * drmm_kfree - &drm_device managed kfree() + * @dev: DRM device + * @data: memory allocation to be freed + * + * This is a &drm_device managed version of kfree() which can be used to + * release memory allocated through drmm_kmalloc() or any of its related + * functions before the final drm_dev_put() of @dev. + */ +void drmm_kfree(struct drm_device *dev, void *data) +{ + struct drmres *dr_match = NULL, *dr; + unsigned long flags; + + if (!data) + return; + + spin_lock_irqsave(&dev->managed.lock, flags); + list_for_each_entry(dr, &dev->managed.resources, node.entry) { + if (dr->data == data) { + dr_match = dr; + del_dr(dev, dr_match); + break; + } + } + spin_unlock_irqrestore(&dev->managed.lock, flags); + + if (WARN_ON(!dr_match)) + return; + + free_dr(dr_match); +} +EXPORT_SYMBOL(drmm_kfree); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 558baf989f5a..bb27c82757f1 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -169,7 +169,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len) EXPORT_SYMBOL(mipi_dbi_command_buf); /* This should only be used by mipi_dbi_command() */ -int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len) +int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, + size_t len) { u8 *buf; int ret; @@ -510,6 +511,10 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev, if (!dbidev->dbi.command) return -EINVAL; + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL); if (!dbidev->tx_buf) return -ENOMEM; @@ -579,26 +584,6 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, EXPORT_SYMBOL(mipi_dbi_dev_init); /** - * mipi_dbi_release - DRM driver release helper - * @drm: DRM device - * - * This function finalizes and frees &mipi_dbi. - * - * Drivers can use this as their &drm_driver->release callback. - */ -void mipi_dbi_release(struct drm_device *drm) -{ - struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm); - - DRM_DEBUG_DRIVER("\n"); - - drm_mode_config_cleanup(drm); - drm_dev_fini(drm); - kfree(dbidev); -} -EXPORT_SYMBOL(mipi_dbi_release); - -/** * mipi_dbi_hw_reset - Hardware reset of controller * @dbi: MIPI DBI structure * @@ -1308,10 +1293,8 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = { * controller or getting the read command values. * Drivers can use this as their &drm_driver->debugfs_init callback. * - * Returns: - * Zero on success, negative error code on failure. */ -int mipi_dbi_debugfs_init(struct drm_minor *minor) +void mipi_dbi_debugfs_init(struct drm_minor *minor) { struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev); umode_t mode = S_IFREG | S_IWUSR; @@ -1320,8 +1303,6 @@ int mipi_dbi_debugfs_init(struct drm_minor *minor) mode |= S_IRUGO; debugfs_create_file("command", mode, minor->debugfs_root, dbidev, &mipi_dbi_debugfs_command_fops); - - return 0; } EXPORT_SYMBOL(mipi_dbi_debugfs_init); diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index bc6e208949e8..8981abe8b7c9 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -45,7 +45,6 @@ #include <linux/export.h> #include <linux/interval_tree_generic.h> #include <linux/seq_file.h> -#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/stacktrace.h> @@ -367,11 +366,6 @@ next_hole(struct drm_mm *mm, struct drm_mm_node *node, enum drm_mm_insert_mode mode) { - /* Searching is slow; check if we ran out of time/patience */ - cond_resched(); - if (fatal_signal_pending(current)) - return NULL; - switch (mode) { default: case DRM_MM_INSERT_BEST: @@ -563,7 +557,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, return 0; } - return signal_pending(current) ? -ERESTARTSYS : -ENOSPC; + return -ENOSPC; } EXPORT_SYMBOL(drm_mm_insert_node_in_range); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 08e6eff6a179..5761f838a057 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -25,6 +25,7 @@ #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_file.h> +#include <drm/drm_managed.h> #include <drm/drm_mode_config.h> #include <drm/drm_print.h> #include <linux/dma-resv.h> @@ -373,8 +374,14 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return 0; } +static void drm_mode_config_init_release(struct drm_device *dev, void *ptr) +{ + drm_mode_config_cleanup(dev); +} + /** - * drm_mode_config_init - initialize DRM mode_configuration structure + * drmm_mode_config_init - managed DRM mode_configuration structure + * initialization * @dev: DRM device * * Initialize @dev's mode_config structure, used for tracking the graphics @@ -384,8 +391,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) * problem, since this should happen single threaded at init time. It is the * driver's problem to ensure this guarantee. * + * Cleanup is automatically handled through registering drm_mode_config_cleanup + * with drmm_add_action(). + * + * Returns: 0 on success, negative error value on failure. */ -void drm_mode_config_init(struct drm_device *dev) +int drmm_mode_config_init(struct drm_device *dev) { mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex); @@ -443,8 +454,11 @@ void drm_mode_config_init(struct drm_device *dev) drm_modeset_acquire_fini(&modeset_ctx); dma_resv_fini(&resv); } + + return drmm_add_action_or_reset(dev, drm_mode_config_init_release, + NULL); } -EXPORT_SYMBOL(drm_mode_config_init); +EXPORT_SYMBOL(drmm_mode_config_init); /** * drm_mode_config_cleanup - free up DRM mode_config info @@ -456,6 +470,9 @@ EXPORT_SYMBOL(drm_mode_config_init); * Note that since this /should/ happen single-threaded at driver/device * teardown time, no locking is required. It's the driver's job to ensure that * this guarantee actually holds true. + * + * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for + * drivers to explicitly call this function. */ void drm_mode_config_cleanup(struct drm_device *dev) { @@ -532,3 +549,90 @@ void drm_mode_config_cleanup(struct drm_device *dev) drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup); + +static u32 full_encoder_mask(struct drm_device *dev) +{ + struct drm_encoder *encoder; + u32 encoder_mask = 0; + + drm_for_each_encoder(encoder, dev) + encoder_mask |= drm_encoder_mask(encoder); + + return encoder_mask; +} + +/* + * For some reason we want the encoder itself included in + * possible_clones. Make life easy for drivers by allowing them + * to leave possible_clones unset if no cloning is possible. + */ +static void fixup_encoder_possible_clones(struct drm_encoder *encoder) +{ + if (encoder->possible_clones == 0) + encoder->possible_clones = drm_encoder_mask(encoder); +} + +static void validate_encoder_possible_clones(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + u32 encoder_mask = full_encoder_mask(dev); + struct drm_encoder *other; + + drm_for_each_encoder(other, dev) { + WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) != + !!(other->possible_clones & drm_encoder_mask(encoder)), + "possible_clones mismatch: " + "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. " + "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n", + encoder->base.id, encoder->name, + drm_encoder_mask(encoder), encoder->possible_clones, + other->base.id, other->name, + drm_encoder_mask(other), other->possible_clones); + } + + WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 || + (encoder->possible_clones & ~encoder_mask) != 0, + "Bogus possible_clones: " + "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n", + encoder->base.id, encoder->name, + encoder->possible_clones, encoder_mask); +} + +static u32 full_crtc_mask(struct drm_device *dev) +{ + struct drm_crtc *crtc; + u32 crtc_mask = 0; + + drm_for_each_crtc(crtc, dev) + crtc_mask |= drm_crtc_mask(crtc); + + return crtc_mask; +} + +static void validate_encoder_possible_crtcs(struct drm_encoder *encoder) +{ + u32 crtc_mask = full_crtc_mask(encoder->dev); + + WARN((encoder->possible_crtcs & crtc_mask) == 0 || + (encoder->possible_crtcs & ~crtc_mask) != 0, + "Bogus possible_crtcs: " + "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n", + encoder->base.id, encoder->name, + encoder->possible_crtcs, crtc_mask); +} + +void drm_mode_config_validate(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + drm_for_each_encoder(encoder, dev) + fixup_encoder_possible_clones(encoder); + + drm_for_each_encoder(encoder, dev) { + validate_encoder_possible_clones(encoder); + validate_encoder_possible_crtcs(encoder); + } +} diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 81aa21561982..75e2b7053f35 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -30,12 +30,13 @@ #include <drm/drm.h> #include <drm/drm_agpsupport.h> #include <drm/drm_drv.h> -#include <drm/drm_pci.h> #include <drm/drm_print.h> #include "drm_internal.h" #include "drm_legacy.h" +#ifdef CONFIG_DRM_LEGACY + /** * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. * @dev: DRM device @@ -93,6 +94,7 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) } EXPORT_SYMBOL(drm_pci_free); +#endif static int drm_get_pci_domain(struct drm_device *dev) { diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 1de2cde2277c..282774e469ac 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -962,27 +962,40 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, unsigned count; struct scatterlist *sg; struct page *page; - u32 len, index; + u32 page_len, page_index; dma_addr_t addr; + u32 dma_len, dma_index; - index = 0; + /* + * Scatterlist elements contains both pages and DMA addresses, but + * one shoud not assume 1:1 relation between them. The sg->length is + * the size of the physical memory chunk described by the sg->page, + * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk + * described by the sg_dma_address(sg). + */ + page_index = 0; + dma_index = 0; for_each_sg(sgt->sgl, sg, sgt->nents, count) { - len = sg_dma_len(sg); + page_len = sg->length; page = sg_page(sg); + dma_len = sg_dma_len(sg); addr = sg_dma_address(sg); - while (len > 0) { - if (WARN_ON(index >= max_entries)) + while (pages && page_len > 0) { + if (WARN_ON(page_index >= max_entries)) return -1; - if (pages) - pages[index] = page; - if (addrs) - addrs[index] = addr; - + pages[page_index] = page; page++; + page_len -= PAGE_SIZE; + page_index++; + } + while (addrs && dma_len > 0) { + if (WARN_ON(dma_index >= max_entries)) + return -1; + addrs[dma_index] = addr; addr += PAGE_SIZE; - len -= PAGE_SIZE; - index++; + dma_len -= PAGE_SIZE; + dma_index++; } } return 0; diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index da7b0b0c1090..758bf74e1cab 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -30,6 +30,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_vblank.h> @@ -40,6 +41,69 @@ /** * DOC: vblank handling * + * From the computer's perspective, every time the monitor displays + * a new frame the scanout engine has "scanned out" the display image + * from top to bottom, one row of pixels at a time. The current row + * of pixels is referred to as the current scanline. + * + * In addition to the display's visible area, there's usually a couple of + * extra scanlines which aren't actually displayed on the screen. + * These extra scanlines don't contain image data and are occasionally used + * for features like audio and infoframes. The region made up of these + * scanlines is referred to as the vertical blanking region, or vblank for + * short. + * + * For historical reference, the vertical blanking period was designed to + * give the electron gun (on CRTs) enough time to move back to the top of + * the screen to start scanning out the next frame. Similar for horizontal + * blanking periods. They were designed to give the electron gun enough + * time to move back to the other side of the screen to start scanning the + * next scanline. + * + * :: + * + * + * physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ + * top of | | + * display | | + * | New frame | + * | | + * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| + * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline, + * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the + * | | frame as it + * | | travels down + * | | ("sacn out") + * | Old frame | + * | | + * | | + * | | + * | | physical + * | | bottom of + * vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display + * blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ + * region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ + * ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ + * start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ + * new frame + * + * "Physical top of display" is the reference point for the high-precision/ + * corrected timestamp. + * + * On a lot of display hardware, programming needs to take effect during the + * vertical blanking period so that settings like gamma, the image buffer + * buffer to be scanned out, etc. can safely be changed without showing + * any visual artifacts on the screen. In some unforgiving hardware, some of + * this programming has to both start and end in the same vblank. To help + * with the timing of the hardware programming, an interrupt is usually + * available to notify the driver when it can start the updating of registers. + * The interrupt is in this context named the vblank interrupt. + * + * The vblank interrupt may be fired at different points depending on the + * hardware. Some hardware implementations will fire the interrupt when the + * new frame start, other implementations will fire the interrupt at different + * points in time. + * * Vertical blanking plays a major role in graphics rendering. To achieve * tear-free display, users must synchronize page flips and/or rendering to * vertical blanking. The DRM API offers ioctls to perform page flips @@ -425,14 +489,10 @@ static void vblank_disable_fn(struct timer_list *t) spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } -void drm_vblank_cleanup(struct drm_device *dev) +static void drm_vblank_init_release(struct drm_device *dev, void *ptr) { unsigned int pipe; - /* Bail if the driver didn't call drm_vblank_init() */ - if (dev->num_crtcs == 0) - return; - for (pipe = 0; pipe < dev->num_crtcs; pipe++) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; @@ -441,10 +501,6 @@ void drm_vblank_cleanup(struct drm_device *dev) del_timer_sync(&vblank->disable_timer); } - - kfree(dev->vblank); - - dev->num_crtcs = 0; } /** @@ -453,25 +509,29 @@ void drm_vblank_cleanup(struct drm_device *dev) * @num_crtcs: number of CRTCs supported by @dev * * This function initializes vblank support for @num_crtcs display pipelines. - * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for - * drivers with a &drm_driver.release callback. + * Cleanup is handled automatically through a cleanup function added with + * drmm_add_action(). * * Returns: * Zero on success or a negative error code on failure. */ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) { - int ret = -ENOMEM; + int ret; unsigned int i; spin_lock_init(&dev->vbl_lock); spin_lock_init(&dev->vblank_time_lock); + dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL); + if (!dev->vblank) + return -ENOMEM; + dev->num_crtcs = num_crtcs; - dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL); - if (!dev->vblank) - goto err; + ret = drmm_add_action(dev, drm_vblank_init_release, NULL); + if (ret) + return ret; for (i = 0; i < num_crtcs; i++) { struct drm_vblank_crtc *vblank = &dev->vblank[i]; @@ -486,10 +546,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); return 0; - -err: - dev->num_crtcs = 0; - return ret; } EXPORT_SYMBOL(drm_vblank_init); diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index aa88911bbc06..56197ae0b2f9 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -595,8 +595,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &drm_vm_ops; break; } + fallthrough; /* to _DRM_FRAME_BUFFER... */ #endif - /* fall through - to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: offset = drm_core_get_reg_ofs(dev); @@ -621,7 +621,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; vma->vm_page_prot = drm_dma_prot(map->type, vma); - /* fall through - to _DRM_SHM */ + fallthrough; /* to _DRM_SHM */ case _DRM_SHM: vma->vm_ops = &drm_vm_shm_ops; vma->vm_private_data = (void *)map; diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c deleted file mode 100644 index 2000d9b33fd5..000000000000 --- a/drivers/gpu/drm/drm_vram_helper_common.c +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -#include <linux/module.h> - -/** - * DOC: overview - * - * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM - * buffer object that is backed by video RAM. It can be used for - * framebuffer devices with dedicated memory. The video RAM is managed - * by &struct drm_vram_mm (VRAM MM). - * - * With the GEM interface userspace applications create, manage and destroy - * graphics buffers, such as an on-screen framebuffer. GEM does not provide - * an implementation of these interfaces. It's up to the DRM driver to - * provide an implementation that suits the hardware. If the hardware device - * contains dedicated video memory, the DRM driver can use the VRAM helper - * library. Each active buffer object is stored in video RAM. Active - * buffer are used for drawing the current frame, typically something like - * the frame's scanout buffer or the cursor image. If there's no more space - * left in VRAM, inactive GEM objects can be moved to system memory. - * - * The easiest way to use the VRAM helper library is to call - * drm_vram_helper_alloc_mm(). The function allocates and initializes an - * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use - * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and - * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations; - * as illustrated below. - * - * .. code-block:: c - * - * struct file_operations fops ={ - * .owner = THIS_MODULE, - * DRM_VRAM_MM_FILE_OPERATION - * }; - * struct drm_driver drv = { - * .driver_feature = DRM_ ... , - * .fops = &fops, - * DRM_GEM_VRAM_DRIVER - * }; - * - * int init_drm_driver() - * { - * struct drm_device *dev; - * uint64_t vram_base; - * unsigned long vram_size; - * int ret; - * - * // setup device, vram base and size - * // ... - * - * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); - * if (ret) - * return ret; - * return 0; - * } - * - * This creates an instance of &struct drm_vram_mm, exports DRM userspace - * interfaces for GEM buffer management and initializes file operations to - * allow for accessing created GEM buffers. With this setup, the DRM driver - * manages an area of video RAM with VRAM MM and provides GEM VRAM objects - * to userspace. - * - * To clean up the VRAM memory management, call drm_vram_helper_release_mm() - * in the driver's clean-up code. - * - * .. code-block:: c - * - * void fini_drm_driver() - * { - * struct drm_device *dev = ...; - * - * drm_vram_helper_release_mm(dev); - * } - * - * For drawing or scanout operations, buffer object have to be pinned in video - * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or - * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system - * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards. - * - * A buffer object that is pinned in video RAM has a fixed address within that - * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically - * it's used to program the hardware's scanout engine for framebuffers, set - * the cursor overlay's image for a mouse cursor, or use it as input to the - * hardware's draing engine. - * - * To access a buffer object's memory from the DRM driver, call - * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address - * space and returns the memory address. Use drm_gem_vram_kunmap() to - * release the mapping. - */ - -MODULE_DESCRIPTION("DRM VRAM memory-management helpers"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index a8685b2e1803..27c948f5dfeb 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -231,21 +231,11 @@ static struct drm_info_list etnaviv_debugfs_list[] = { {"ring", show_each_gpu, 0, etnaviv_ring_show}, }; -static int etnaviv_debugfs_init(struct drm_minor *minor) +static void etnaviv_debugfs_init(struct drm_minor *minor) { - struct drm_device *dev = minor->dev; - int ret; - - ret = drm_debugfs_create_files(etnaviv_debugfs_list, - ARRAY_SIZE(etnaviv_debugfs_list), - minor->debugfs_root, minor); - - if (ret) { - dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); - return ret; - } - - return ret; + drm_debugfs_create_files(etnaviv_debugfs_list, + ARRAY_SIZE(etnaviv_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index d23d3502ca91..9ac51b6ab34b 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -25,6 +25,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/exynos_drm.h> #include "exynos_drm_crtc.h" @@ -135,10 +136,6 @@ static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { .disable = exynos_dp_nop, }; -static const struct drm_encoder_funcs exynos_dp_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) { int ret; @@ -159,23 +156,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; int ret; - dp->dev = dev; dp->drm_dev = drm_dev; - dp->plat_data.dev_type = EXYNOS_DP; - dp->plat_data.power_on_start = exynos_dp_poweron; - dp->plat_data.power_off = exynos_dp_poweroff; - dp->plat_data.attach = exynos_dp_bridge_attach; - dp->plat_data.get_modes = exynos_dp_get_modes; - if (!dp->plat_data.panel && !dp->ptn_bridge) { ret = exynos_dp_dt_parse_panel(dp); if (ret) return ret; } - drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); @@ -185,13 +174,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) dp->plat_data.encoder = encoder; - dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); - if (IS_ERR(dp->adp)) { + ret = analogix_dp_bind(dp->adp, dp->drm_dev); + if (ret) dp->encoder.funcs->destroy(&dp->encoder); - return PTR_ERR(dp->adp); - } - return 0; + return ret; } static void exynos_dp_unbind(struct device *dev, struct device *master, @@ -222,6 +209,7 @@ static int exynos_dp_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; + dp->dev = dev; /* * We just use the drvdata until driver run into component * add function, and then we would set drvdata to null, so @@ -247,16 +235,29 @@ static int exynos_dp_probe(struct platform_device *pdev) /* The remote port can be either a panel or a bridge */ dp->plat_data.panel = panel; + dp->plat_data.dev_type = EXYNOS_DP; + dp->plat_data.power_on_start = exynos_dp_poweron; + dp->plat_data.power_off = exynos_dp_poweroff; + dp->plat_data.attach = exynos_dp_bridge_attach; + dp->plat_data.get_modes = exynos_dp_get_modes; dp->plat_data.skip_connector = !!bridge; + dp->ptn_bridge = bridge; out: + dp->adp = analogix_dp_probe(dev, &dp->plat_data); + if (IS_ERR(dp->adp)) + return PTR_ERR(dp->adp); + return component_add(&pdev->dev, &exynos_dp_ops); } static int exynos_dp_remove(struct platform_device *pdev) { + struct exynos_dp_device *dp = platform_get_drvdata(pdev); + component_del(&pdev->dev, &exynos_dp_ops); + analogix_dp_remove(dp->adp); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 43fa0f26c052..7ba5354e7d94 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -14,6 +14,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <video/of_videomode.h> #include <video/videomode.h> @@ -149,10 +150,6 @@ static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = { .disable = exynos_dpi_disable, }; -static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - enum { FIMD_PORT_IN0, FIMD_PORT_IN1, @@ -201,8 +198,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder) { int ret; - drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index e080aa92338c..902938d2568f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -30,6 +30,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" @@ -1523,10 +1524,6 @@ static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { .disable = exynos_dsi_disable, }; -static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); static int exynos_dsi_host_attach(struct mipi_dsi_host *host, @@ -1704,8 +1701,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, struct drm_bridge *in_bridge; int ret; - drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index b320b3a21ad4..282467121699 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -14,6 +14,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> #include <drm/exynos_drm.h> @@ -369,10 +370,6 @@ static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = .disable = exynos_vidi_disable, }; -static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int vidi_bind(struct device *dev, struct device *master, void *data) { struct vidi_context *ctx = dev_get_drvdata(dev); @@ -406,8 +403,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(ctx->crtc); } - drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 1a7c828fc41d..95dd399aa9cc 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -38,6 +38,7 @@ #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "exynos_drm_crtc.h" #include "regs-hdmi.h" @@ -1559,10 +1560,6 @@ static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = .disable = hdmi_disable, }; -static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static void hdmi_audio_shutdown(struct device *dev, void *data) { struct hdmi_context *hdata = dev_get_drvdata(dev); @@ -1843,8 +1840,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) hdata->phy_clk.enable = hdmiphy_clk_enable; - drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index cff344367f81..9b0c4736c21a 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -13,19 +13,11 @@ #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "fsl_dcu_drm_drv.h" #include "fsl_tcon.h" -static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = fsl_dcu_drm_encoder_destroy, -}; - int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, struct drm_crtc *crtc) { @@ -38,8 +30,8 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); - ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + ret = drm_simple_encoder_init(fsl_dev->drm, encoder, + DRM_MODE_ENCODER_LVDS); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 29c36d63b20e..88535f5aacc5 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -28,6 +28,8 @@ #include <linux/i2c.h> #include <linux/pm_runtime.h> +#include <drm/drm_simple_kms_helper.h> + #include "cdv_device.h" #include "intel_bios.h" #include "power.h" @@ -237,15 +239,6 @@ static const struct drm_connector_helper_funcs .best_encoder = gma_best_encoder, }; -static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = { - .destroy = cdv_intel_crt_enc_destroy, -}; - void cdv_intel_crt_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { @@ -271,8 +264,7 @@ void cdv_intel_crt_init(struct drm_device *dev, &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); encoder = &gma_encoder->base; - drm_encoder_init(dev, encoder, - &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 5772b2dce0d6..f41cbb753bb4 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -32,6 +32,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "gma_display.h" #include "psb_drv.h" @@ -1271,37 +1272,8 @@ cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; } - -#if 0 -static char *voltage_names[] = { - "0.4V", "0.6V", "0.8V", "1.2V" -}; -static char *pre_emph_names[] = { - "0dB", "3.5dB", "6dB", "9.5dB" -}; -static char *link_train_names[] = { - "pattern 1", "pattern 2", "idle", "off" -}; -#endif - #define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3 -/* -static uint8_t -cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing) -{ - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_400: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_600: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_800: - return DP_TRAIN_PRE_EMPHASIS_3_5; - case DP_TRAIN_VOLTAGE_SWING_1200: - default: - return DP_TRAIN_PRE_EMPHASIS_0; - } -} -*/ + static void cdv_intel_get_adjust_train(struct gma_encoder *encoder) { @@ -1908,11 +1880,6 @@ cdv_intel_dp_destroy(struct drm_connector *connector) kfree(connector); } -static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = { .dpms = cdv_intel_dp_dpms, .mode_fixup = cdv_intel_dp_mode_fixup, @@ -1935,11 +1902,6 @@ static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_fun .best_encoder = gma_best_encoder, }; -static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { - .destroy = cdv_intel_dp_encoder_destroy, -}; - - static void cdv_intel_dp_add_properties(struct drm_connector *connector) { cdv_intel_attach_force_audio_property(connector); @@ -2016,8 +1978,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev encoder = &gma_encoder->base; drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); - drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); gma_connector_attach_encoder(gma_connector, gma_encoder); @@ -2120,7 +2081,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev if (ret == 0) { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); - cdv_intel_dp_encoder_destroy(encoder); + drm_encoder_cleanup(encoder); cdv_intel_dp_destroy(connector); goto err_priv; } else { diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 1711a41acc16..0d12c6ffbc40 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -32,6 +32,7 @@ #include <drm/drm.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_simple_kms_helper.h> #include "cdv_device.h" #include "psb_drv.h" @@ -311,8 +312,7 @@ void cdv_hdmi_init(struct drm_device *dev, &cdv_hdmi_connector_funcs, DRM_MODE_CONNECTOR_DVID); - drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_HDMI; diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index ea0a5d9a0acc..eaaf4efec217 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -12,6 +12,8 @@ #include <linux/i2c.h> #include <linux/pm_runtime.h> +#include <drm/drm_simple_kms_helper.h> + #include "cdv_device.h" #include "intel_bios.h" #include "power.h" @@ -72,89 +74,6 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev) return retval; } -#if 0 -/* - * Set LVDS backlight level by I2C command - */ -static int cdv_lvds_i2c_set_brightness(struct drm_device *dev, - unsigned int level) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus; - u8 out_buf[2]; - unsigned int blc_i2c_brightness; - - struct i2c_msg msgs[] = { - { - .addr = lvds_i2c_bus->slave_addr, - .flags = 0, - .len = 2, - .buf = out_buf, - } - }; - - blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level * - BRIGHTNESS_MASK / - BRIGHTNESS_MAX_LEVEL); - - if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) - blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness; - - out_buf[0] = dev_priv->lvds_bl->brightnesscmd; - out_buf[1] = (u8)blc_i2c_brightness; - - if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) - return 0; - - DRM_ERROR("I2C transfer error\n"); - return -1; -} - - -static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - - u32 max_pwm_blc; - u32 blc_pwm_duty_cycle; - - max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev); - - /*BLC_PWM_CTL Should be initiated while backlight device init*/ - BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0); - - blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; - - if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) - blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle; - - blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR; - REG_WRITE(BLC_PWM_CTL, - (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) | - (blc_pwm_duty_cycle)); - - return 0; -} - -/* - * Set LVDS backlight level either by I2C or PWM - */ -void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - - if (!dev_priv->lvds_bl) { - DRM_ERROR("NO LVDS Backlight Info\n"); - return; - } - - if (dev_priv->lvds_bl->type == BLC_I2C_TYPE) - cdv_lvds_i2c_set_brightness(dev, level); - else - cdv_lvds_pwm_set_brightness(dev, level); -} -#endif - /** * Sets the backlight level. * @@ -499,16 +418,6 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { .destroy = cdv_intel_lvds_destroy, }; - -static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = { - .destroy = cdv_intel_lvds_enc_destroy, -}; - /* * Enumerate the child dev array parsed from VBT to check whether * the LVDS is present. @@ -616,10 +525,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, &cdv_intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(dev, encoder, - &cdv_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS, NULL); - + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_LVDS; diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 1d8f67e4795a..23a78d755382 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -577,31 +577,31 @@ static void psb_setup_outputs(struct drm_device *dev) break; case INTEL_OUTPUT_SDVO: crtc_mask = dev_priv->ops->sdvo_mask; - clone_mask = (1 << INTEL_OUTPUT_SDVO); + clone_mask = 0; break; case INTEL_OUTPUT_LVDS: - crtc_mask = dev_priv->ops->lvds_mask; - clone_mask = (1 << INTEL_OUTPUT_LVDS); + crtc_mask = dev_priv->ops->lvds_mask; + clone_mask = 0; break; case INTEL_OUTPUT_MIPI: crtc_mask = (1 << 0); - clone_mask = (1 << INTEL_OUTPUT_MIPI); + clone_mask = 0; break; case INTEL_OUTPUT_MIPI2: crtc_mask = (1 << 2); - clone_mask = (1 << INTEL_OUTPUT_MIPI2); + clone_mask = 0; break; case INTEL_OUTPUT_HDMI: - crtc_mask = dev_priv->ops->hdmi_mask; + crtc_mask = dev_priv->ops->hdmi_mask; clone_mask = (1 << INTEL_OUTPUT_HDMI); break; case INTEL_OUTPUT_DISPLAYPORT: crtc_mask = (1 << 0) | (1 << 1); - clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); + clone_mask = 0; break; case INTEL_OUTPUT_EDP: crtc_mask = (1 << 1); - clone_mask = (1 << INTEL_OUTPUT_EDP); + clone_mask = 0; } encoder->possible_crtcs = crtc_mask; encoder->possible_clones = diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c index d4c65f268922..c976a9dd9240 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c @@ -27,6 +27,8 @@ #include <linux/delay.h> +#include <drm/drm_simple_kms_helper.h> + #include "mdfld_dsi_dpi.h" #include "mdfld_dsi_pkg_sender.h" #include "mdfld_output.h" @@ -993,10 +995,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, /*create drm encoder object*/ connector = &dsi_connector->base.base; encoder = &dpi_output->base.base.base; - drm_encoder_init(dev, - encoder, - p_funcs->encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); @@ -1006,10 +1005,10 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, /*set possible crtcs and clones*/ if (dsi_connector->pipe) { encoder->possible_crtcs = (1 << 2); - encoder->possible_clones = (1 << 1); + encoder->possible_clones = 0; } else { encoder->possible_crtcs = (1 << 0); - encoder->possible_clones = (1 << 0); + encoder->possible_clones = 0; } dsi_connector->base.encoder = &dpi_output->base.base; diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index 4fff110c4921..aae2d358364c 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -658,16 +658,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, dev_dbg(dev->dev, "pipe = 0x%x\n", pipe); -#if 0 - if (pipe == 1) { - if (!gma_power_begin(dev, true)) - return 0; - android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode, - x, y, old_fb); - goto mrst_crtc_mode_set_exit; - } -#endif - ret = check_fb(crtc->primary->fb); if (ret) return ret; @@ -918,14 +908,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, } dpll = 0; -#if 0 /* FIXME revisit later */ - if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 || - ksel == KSEL_BYPASS_25) - dpll &= ~MDFLD_INPUT_REF_SEL; - else if (ksel == KSEL_BYPASS_83_100) - dpll |= MDFLD_INPUT_REF_SEL; -#endif /* FIXME revisit later */ - if (is_hdmi) dpll |= MDFLD_VCO_SEL; @@ -935,20 +917,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, /* compute bitmask from p1 value */ dpll |= (1 << (clock.p1 - 2)) << 17; -#if 0 /* 1080p30 & 720p */ - dpll = 0x00050000; - fp = 0x000001be; -#endif -#if 0 /* 480p */ - dpll = 0x02010000; - fp = 0x000000d2; -#endif } else { -#if 0 /*DBI_TPO_480x864*/ - dpll = 0x00020000; - fp = 0x00000156; -#endif /* DBI_TPO_480x864 */ /* get from spec. */ - dpll = 0x00800000; fp = 0x000000c1; } diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h index ab2b27c0f037..17a944d70add 100644 --- a/drivers/gpu/drm/gma500/mdfld_output.h +++ b/drivers/gpu/drm/gma500/mdfld_output.h @@ -51,7 +51,6 @@ struct panel_info { }; struct panel_funcs { - const struct drm_encoder_funcs *encoder_funcs; const struct drm_encoder_helper_funcs *encoder_helper_funcs; struct drm_display_mode * (*get_config_mode)(struct drm_device *); int (*get_panel_info)(struct drm_device *, int, struct panel_info *); diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c index 49c92debb7b2..25e897b98f86 100644 --- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c +++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c @@ -188,13 +188,7 @@ static const struct drm_encoder_helper_funcs .commit = mdfld_dsi_dpi_commit, }; -/*TPO DPI encoder funcs*/ -static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - const struct panel_funcs mdfld_tmd_vid_funcs = { - .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, .get_config_mode = &tmd_vid_get_config_mode, .get_panel_info = tmd_vid_get_panel_info, diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c index a9420bf9a419..11845978fb0a 100644 --- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c +++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c @@ -76,13 +76,7 @@ static const struct drm_encoder_helper_funcs .commit = mdfld_dsi_dpi_commit, }; -/*TPO DPI encoder funcs*/ -static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - const struct panel_funcs mdfld_tpo_vid_funcs = { - .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, .get_config_mode = &tpo_vid_get_config_mode, .get_panel_info = tpo_vid_get_panel_info, diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index f4370232767d..a097a59a9eae 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -27,6 +27,7 @@ #include <linux/delay.h> #include <drm/drm.h> +#include <drm/drm_simple_kms_helper.h> #include "psb_drv.h" #include "psb_intel_drv.h" @@ -620,15 +621,6 @@ static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = { .destroy = oaktrail_hdmi_destroy, }; -static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = { - .destroy = oaktrail_hdmi_enc_destroy, -}; - void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { @@ -651,9 +643,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, &oaktrail_hdmi_connector_funcs, DRM_MODE_CONNECTOR_DVID); - drm_encoder_init(dev, encoder, - &oaktrail_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); gma_connector_attach_encoder(gma_connector, gma_encoder); @@ -673,11 +663,6 @@ failed_connector: kfree(gma_encoder); } -static const struct pci_device_id hdmi_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) }, - { 0 } -}; - void oaktrail_hdmi_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 582e09597500..2828360153d1 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -13,6 +13,8 @@ #include <asm/intel-mid.h> +#include <drm/drm_simple_kms_helper.h> + #include "intel_bios.h" #include "power.h" #include "psb_drv.h" @@ -311,8 +313,7 @@ void oaktrail_lvds_init(struct drm_device *dev, &psb_intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_LVDS; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 16c6136f778b..fb601983cef0 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -252,7 +252,6 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value); extern void psb_intel_lvds_destroy(struct drm_connector *connector); -extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs; /* intel_gmbus.c */ extern void gma_intel_i2c_reset(struct drm_device *dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index afaebab7bc17..063c66bb946d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -11,6 +11,8 @@ #include <linux/i2c.h> #include <linux/pm_runtime.h> +#include <drm/drm_simple_kms_helper.h> + #include "intel_bios.h" #include "power.h" #include "psb_drv.h" @@ -621,18 +623,6 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { .destroy = psb_intel_lvds_destroy, }; - -static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = { - .destroy = psb_intel_lvds_enc_destroy, -}; - - - /** * psb_intel_lvds_init - setup LVDS connectors on this device * @dev: drm device @@ -683,9 +673,7 @@ void psb_intel_lvds_init(struct drm_device *dev, &psb_intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(dev, encoder, - &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_LVDS; diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 264d7ad004b4..68fb3d7c172b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -864,36 +864,6 @@ static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sd DRM_INFO("HDMI is not supported yet"); return false; -#if 0 - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; - uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; - uint8_t set_buf_index[2] = { 1, 0 }; - uint64_t *data = (uint64_t *)&avi_if; - unsigned i; - - intel_dip_infoframe_csum(&avi_if); - - if (!psb_intel_sdvo_set_value(psb_intel_sdvo, - SDVO_CMD_SET_HBUF_INDEX, - set_buf_index, 2)) - return false; - - for (i = 0; i < sizeof(avi_if); i += 8) { - if (!psb_intel_sdvo_set_value(psb_intel_sdvo, - SDVO_CMD_SET_HBUF_DATA, - data, 8)) - return false; - data++; - } - - return psb_intel_sdvo_set_value(psb_intel_sdvo, - SDVO_CMD_SET_HBUF_TXRATE, - &tx_rate, 1); -#endif } static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo) @@ -1227,75 +1197,6 @@ static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdv return true; } -/* No use! */ -#if 0 -struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB) -{ - struct drm_connector *connector = NULL; - struct psb_intel_sdvo *iout = NULL; - struct psb_intel_sdvo *sdvo; - - /* find the sdvo connector */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - iout = to_psb_intel_sdvo(connector); - - if (iout->type != INTEL_OUTPUT_SDVO) - continue; - - sdvo = iout->dev_priv; - - if (sdvo->sdvo_reg == SDVOB && sdvoB) - return connector; - - if (sdvo->sdvo_reg == SDVOC && !sdvoB) - return connector; - - } - - return NULL; -} - -int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector) -{ - u8 response[2]; - u8 status; - struct psb_intel_sdvo *psb_intel_sdvo; - DRM_DEBUG_KMS("\n"); - - if (!connector) - return 0; - - psb_intel_sdvo = to_psb_intel_sdvo(connector); - - return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, - &response, 2) && response[0]; -} - -void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on) -{ - u8 response[2]; - u8 status; - struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector); - - psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2); - - if (on) { - psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2); - - psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); - } else { - response[0] = 0; - response[1] = 0; - psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); - } - - psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2); -} -#endif - static bool psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo) { diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c index 9e8224456ea2..e5bdd99ad453 100644 --- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c +++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c @@ -747,11 +747,11 @@ static int cmi_lcd_hack_create_device(void) return -EINVAL; } - client = i2c_new_device(adapter, &info); - if (!client) { - pr_err("%s: i2c_new_device() failed\n", __func__); + client = i2c_new_client_device(adapter, &info); + if (IS_ERR(client)) { + pr_err("%s: creating I2C device failed\n", __func__); i2c_put_adapter(adapter); - return -EINVAL; + return PTR_ERR(client); } return 0; @@ -765,12 +765,7 @@ static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = { .commit = mdfld_dsi_dpi_commit, }; -static const struct drm_encoder_funcs tc35876x_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - const struct panel_funcs mdfld_tc35876x_funcs = { - .encoder_funcs = &tc35876x_encoder_funcs, .encoder_helper_funcs = &tc35876x_encoder_helper_funcs, .get_config_mode = tc35876x_get_config_mode, .get_panel_info = tc35876x_get_panel_info, diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index 55b46a7150a5..cc70e836522f 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -94,6 +94,10 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } + if (state->fb->pitches[0] % 128 != 0) { + DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n"); + return -EINVAL; + } return 0; } @@ -119,11 +123,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); reg = state->fb->width * (state->fb->format->cpp[0]); - /* now line_pad is 16 */ - reg = PADDING(16, reg); - line_l = state->fb->width * state->fb->format->cpp[0]; - line_l = PADDING(16, line_l); + line_l = state->fb->pitches[0]; writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) | HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l), priv->mmio + HIBMC_CRT_FB_WIDTH); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 222356a4f9a8..a6fd0c29e5b8 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -94,7 +94,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) priv->dev->mode_config.max_height = 1200; priv->dev->mode_config.fb_base = priv->fb_base; - priv->dev->mode_config.preferred_depth = 24; + priv->dev->mode_config.preferred_depth = 32; priv->dev->mode_config.prefer_shadow = 1; priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs; @@ -307,11 +307,7 @@ static int hibmc_load(struct drm_device *dev) /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(dev); - ret = drm_fbdev_generic_setup(dev, 16); - if (ret) { - DRM_ERROR("failed to initialize fbdev: %d\n", ret); - goto err; - } + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); return 0; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index 99397ac3b363..322bd542e89d 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -50,7 +50,7 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc) int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args); + return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args); } const struct drm_mode_config_funcs hibmc_mode_funcs = { diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index f31068d74b18..00e87c290796 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -20,11 +20,11 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_device.h> -#include <drm/drm_encoder_slave.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "dw_dsi_reg.h" @@ -696,10 +696,6 @@ static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = { .disable = dsi_encoder_disable }; -static const struct drm_encoder_funcs dw_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int dw_drm_encoder_init(struct device *dev, struct drm_device *drm_dev, struct drm_encoder *encoder) @@ -713,8 +709,7 @@ static int dw_drm_encoder_init(struct device *dev, } encoder->possible_crtcs = crtc_mask; - ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI); if (ret) { DRM_ERROR("failed to init dsi encoder\n"); return ret; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 86000127d4ee..c339e632522a 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -940,7 +940,6 @@ static struct drm_driver ade_driver = { }; struct kirin_drm_data ade_driver_data = { - .register_connects = false, .num_planes = ADE_CH_NUM, .prim_plane = ADE_CH1, .channel_formats = channel_formats, diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index d3145ae877d7..4349da3e2379 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -219,40 +219,6 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev) return 0; } -static int kirin_drm_connectors_register(struct drm_device *dev) -{ - struct drm_connector *connector; - struct drm_connector *failed_connector; - struct drm_connector_list_iter conn_iter; - int ret; - - mutex_lock(&dev->mode_config.mutex); - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - ret = drm_connector_register(connector); - if (ret) { - failed_connector = connector; - goto err; - } - } - drm_connector_list_iter_end(&conn_iter); - mutex_unlock(&dev->mode_config.mutex); - - return 0; - -err: - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (failed_connector == connector) - break; - drm_connector_unregister(connector); - } - drm_connector_list_iter_end(&conn_iter); - mutex_unlock(&dev->mode_config.mutex); - - return ret; -} - static int kirin_drm_bind(struct device *dev) { struct kirin_drm_data *driver_data; @@ -279,17 +245,8 @@ static int kirin_drm_bind(struct device *dev) drm_fbdev_generic_setup(drm_dev, 32); - /* connectors should be registered after drm device register */ - if (driver_data->register_connects) { - ret = kirin_drm_connectors_register(drm_dev); - if (ret) - goto err_drm_dev_unregister; - } - return 0; -err_drm_dev_unregister: - drm_dev_unregister(drm_dev); err_kms_cleanup: kirin_drm_kms_cleanup(drm_dev); err_drm_dev_put: diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h index 4d5c05a24065..dee8ec2f7f2e 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h @@ -37,7 +37,6 @@ struct kirin_drm_data { u32 channel_formats_cnt; int config_max_width; int config_max_height; - bool register_connects; u32 num_planes; u32 prim_plane; diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c index a839f78a4c8a..741886b54419 100644 --- a/drivers/gpu/drm/i2c/sil164_drv.c +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -393,7 +393,7 @@ sil164_detect_slave(struct i2c_client *client) return NULL; } - return i2c_new_device(adap, &info); + return i2c_new_client_device(adap, &info); } static int @@ -402,6 +402,7 @@ sil164_encoder_init(struct i2c_client *client, struct drm_encoder_slave *encoder) { struct sil164_priv *priv; + struct i2c_client *slave_client; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -410,7 +411,9 @@ sil164_encoder_init(struct i2c_client *client, encoder->slave_priv = priv; encoder->slave_funcs = &sil164_encoder_funcs; - priv->duallink_slave = sil164_detect_slave(client); + slave_client = sil164_detect_slave(client); + if (!IS_ERR(slave_client)) + priv->duallink_slave = slave_client; return 0; } diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index c3332209f27a..3c90d7ae09d6 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -19,6 +19,7 @@ #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/i2c/tda998x.h> #include <media/cec-notifier.h> @@ -1949,9 +1950,9 @@ static int tda998x_create(struct device *dev) cec_info.platform_data = &priv->cec_glue; cec_info.irq = client->irq; - priv->cec = i2c_new_device(client->adapter, &cec_info); - if (!priv->cec) { - ret = -ENODEV; + priv->cec = i2c_new_client_device(client->adapter, &cec_info); + if (IS_ERR(priv->cec)) { + ret = PTR_ERR(priv->cec); goto fail; } @@ -1997,15 +1998,6 @@ err_irq: /* DRM encoder functions */ -static void tda998x_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs tda998x_encoder_funcs = { - .destroy = tda998x_encoder_destroy, -}; - static int tda998x_encoder_init(struct device *dev, struct drm_device *drm) { struct tda998x_priv *priv = dev_get_drvdata(dev); @@ -2023,8 +2015,8 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm) priv->encoder.possible_crtcs = crtcs; - ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + ret = drm_simple_encoder_init(drm, &priv->encoder, + DRM_MODE_ENCODER_TMDS); if (ret) goto err_encoder; diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore index d9a77f3b59b2..81972dce1aff 100644 --- a/drivers/gpu/drm/i915/.gitignore +++ b/drivers/gpu/drm/i915/.gitignore @@ -1 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only *.hdrtest diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 9f887a86e555..44c506b7e117 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -28,9 +28,6 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) -subdir-ccflags-y += \ - $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) - subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -92,6 +89,7 @@ gt-y += \ gt/intel_engine_pool.o \ gt/intel_engine_user.o \ gt/intel_ggtt.o \ + gt/intel_ggtt_fencing.o \ gt/intel_gt.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ @@ -153,7 +151,6 @@ i915-y += \ i915_buddy.o \ i915_cmd_parser.o \ i915_gem_evict.o \ - i915_gem_fence_reg.o \ i915_gem_gtt.o \ i915_gem.o \ i915_globals.o \ @@ -167,14 +164,18 @@ i915-y += \ # general-purpose microcontroller (GuC) support i915-y += gt/uc/intel_uc.o \ + gt/uc/intel_uc_debugfs.o \ gt/uc/intel_uc_fw.o \ gt/uc/intel_guc.o \ gt/uc/intel_guc_ads.o \ gt/uc/intel_guc_ct.o \ + gt/uc/intel_guc_debugfs.o \ gt/uc/intel_guc_fw.o \ gt/uc/intel_guc_log.o \ + gt/uc/intel_guc_log_debugfs.o \ gt/uc/intel_guc_submission.o \ gt/uc/intel_huc.o \ + gt/uc/intel_huc_debugfs.o \ gt/uc/intel_huc_fw.o # modesetting core code @@ -243,23 +244,6 @@ i915-y += \ display/vlv_dsi.o \ display/vlv_dsi_pll.o -# perf code -i915-y += \ - oa/i915_oa_hsw.o \ - oa/i915_oa_bdw.o \ - oa/i915_oa_chv.o \ - oa/i915_oa_sklgt2.o \ - oa/i915_oa_sklgt3.o \ - oa/i915_oa_sklgt4.o \ - oa/i915_oa_bxt.o \ - oa/i915_oa_kblgt2.o \ - oa/i915_oa_kblgt3.o \ - oa/i915_oa_glk.o \ - oa/i915_oa_cflgt2.o \ - oa/i915_oa_cflgt3.o \ - oa/i915_oa_cnl.o \ - oa/i915_oa_icl.o \ - oa/i915_oa_tgl.o i915-y += i915_perf.o # Post-mortem debug and GPU hang state capture diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 17cee6f80d8b..99a25c0bb08f 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -186,16 +186,19 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, static int dsi_send_pkt_payld(struct intel_dsi_host *host, struct mipi_dsi_packet pkt) { + struct intel_dsi *intel_dsi = host->intel_dsi; + struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + /* payload queue can accept *256 bytes*, check limit */ if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) { - DRM_ERROR("payload size exceeds max queue limit\n"); + drm_err(&i915->drm, "payload size exceeds max queue limit\n"); return -1; } /* load data into command payload queue */ if (!add_payld_to_queue(host, pkt.payload, pkt.payload_length)) { - DRM_ERROR("adding payload to queue failed\n"); + drm_err(&i915->drm, "adding payload to queue failed\n"); return -1; } @@ -744,6 +747,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, tmp |= VIDEO_MODE_SYNC_PULSE; break; } + } else { + /* + * FIXME: Retrieve this info from VBT. + * As per the spec when dsi transcoder is operating + * in TE GATE mode, TE comes from GPIO + * which is UTIL PIN for DSI 0. + * Also this GPIO would not be used for other + * purposes is an assumption. + */ + tmp &= ~OP_MODE_MASK; + tmp |= CMD_MODE_TE_GATE; + tmp |= TE_SOURCE_GPIO; } intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); @@ -837,14 +852,33 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, } hactive = adjusted_mode->crtc_hdisplay; - htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div); + + if (is_vid_mode(intel_dsi)) + htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div); + else + htotal = DIV_ROUND_UP((hactive + 160) * mul, div); + hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div); hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div); hsync_size = hsync_end - hsync_start; hback_porch = (adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end); vactive = adjusted_mode->crtc_vdisplay; - vtotal = adjusted_mode->crtc_vtotal; + + if (is_vid_mode(intel_dsi)) { + vtotal = adjusted_mode->crtc_vtotal; + } else { + int bpp, line_time_us, byte_clk_period_ns; + + if (crtc_state->dsc.compression_enable) + bpp = crtc_state->dsc.compressed_bpp; + else + bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + + byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state); + line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count); + vtotal = vactive + DIV_ROUND_UP(400, line_time_us); + } vsync_start = adjusted_mode->crtc_vsync_start; vsync_end = adjusted_mode->crtc_vsync_end; vsync_shift = hsync_start - htotal / 2; @@ -873,7 +907,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, } /* TRANS_HSYNC register to be programmed only for video mode */ - if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) { + if (is_vid_mode(intel_dsi)) { if (intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) { /* BSPEC: hsync size should be atleast 16 pixels */ @@ -916,22 +950,27 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (vsync_start < vactive) drm_err(&dev_priv->drm, "vsync_start less than vactive\n"); - /* program TRANS_VSYNC register */ - for_each_dsi_port(port, intel_dsi->ports) { - dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VSYNC(dsi_trans), - (vsync_start - 1) | ((vsync_end - 1) << 16)); + /* program TRANS_VSYNC register for video mode only */ + if (is_vid_mode(intel_dsi)) { + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + intel_de_write(dev_priv, VSYNC(dsi_trans), + (vsync_start - 1) | ((vsync_end - 1) << 16)); + } } /* - * FIXME: It has to be programmed only for interlaced + * FIXME: It has to be programmed only for video modes and interlaced * modes. Put the check condition here once interlaced * info available as described above. * program TRANS_VSYNCSHIFT register */ - for_each_dsi_port(port, intel_dsi->ports) { - dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift); + if (is_vid_mode(intel_dsi)) { + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), + vsync_shift); + } } /* program TRANS_VBLANK register, should be same as vtotal programmed */ @@ -1016,6 +1055,32 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, } } +static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, + bool enable) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + u32 tmp; + + /* + * used as TE i/p for DSI0, + * for dual link/DSI1 TE is from slave DSI1 + * through GPIO. + */ + if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B))) + return; + + tmp = intel_de_read(dev_priv, UTIL_PIN_CTL); + + if (enable) { + tmp |= UTIL_PIN_DIRECTION_INPUT; + tmp |= UTIL_PIN_ENABLE; + } else { + tmp &= ~UTIL_PIN_ENABLE; + } + intel_de_write(dev_priv, UTIL_PIN_CTL, tmp); +} + static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) @@ -1037,6 +1102,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, /* setup D-PHY timings */ gen11_dsi_setup_dphy_timings(encoder, crtc_state); + /* Since transcoder is configured to take events from GPIO */ + gen11_dsi_config_util_pin(encoder, true); + /* step 4h: setup DSI protocol timeouts */ gen11_dsi_setup_timeouts(encoder, crtc_state); @@ -1088,7 +1156,8 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) wait_for_cmds_dispatched_to_panel(encoder); } -static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder, +static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -1099,7 +1168,8 @@ static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder, gen11_dsi_program_esc_clk_div(encoder, crtc_state); } -static void gen11_dsi_pre_enable(struct intel_encoder *encoder, +static void gen11_dsi_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -1118,7 +1188,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, gen11_dsi_set_transcoder_timings(encoder, pipe_config); } -static void gen11_dsi_enable(struct intel_encoder *encoder, +static void gen11_dsi_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -1180,6 +1251,15 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) enum transcoder dsi_trans; u32 tmp; + /* disable periodic update mode */ + if (is_cmd_mode(intel_dsi)) { + for_each_dsi_port(port, intel_dsi->ports) { + tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); + tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE; + intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); + } + } + /* put dsi link in ULPS */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); @@ -1264,7 +1344,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) } } -static void gen11_dsi_disable(struct intel_encoder *encoder, +static void gen11_dsi_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -1286,11 +1367,14 @@ static void gen11_dsi_disable(struct intel_encoder *encoder, /* step3: disable port */ gen11_dsi_disable_port(encoder); + gen11_dsi_config_util_pin(encoder, false); + /* step4: disable IO power */ gen11_dsi_disable_io_power(encoder); } -static void gen11_dsi_post_disable(struct intel_encoder *encoder, +static void gen11_dsi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -1347,6 +1431,22 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; } +static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum transcoder dsi_trans; + u32 val; + + if (intel_dsi->ports == BIT(PORT_B)) + dsi_trans = TRANSCODER_DSI_1; + else + dsi_trans = TRANSCODER_DSI_0; + + val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); + return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE); +} + static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -1367,6 +1467,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, gen11_dsi_get_timings(encoder, pipe_config); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); + + if (gen11_dsi_is_periodic_cmd_mode(intel_dsi)) + pipe_config->hw.adjusted_mode.private_flags |= + I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; } static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, @@ -1417,6 +1521,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; @@ -1446,10 +1551,32 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->clock_set = true; if (gen11_dsi_dsc_compute_config(encoder, pipe_config)) - DRM_DEBUG_KMS("Attempting to use DSC failed\n"); + drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n"); pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; + /* We would not operate in periodic command mode */ + pipe_config->hw.adjusted_mode.private_flags &= + ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; + + /* + * In case of TE GATE cmd mode, we + * receive TE from the slave if + * dual link is enabled + */ + if (is_cmd_mode(intel_dsi)) { + if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A))) + pipe_config->hw.adjusted_mode.private_flags |= + I915_MODE_FLAG_DSI_USE_TE1 | + I915_MODE_FLAG_DSI_USE_TE0; + else if (intel_dsi->ports == BIT(PORT_B)) + pipe_config->hw.adjusted_mode.private_flags |= + I915_MODE_FLAG_DSI_USE_TE1; + else + pipe_config->hw.adjusted_mode.private_flags |= + I915_MODE_FLAG_DSI_USE_TE0; + } + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 457b258683d3..25dfeb3197aa 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -264,6 +264,20 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, plane_state->hw.color_range = from_plane_state->uapi.color_range; } +void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + + crtc_state->active_planes &= ~BIT(plane->id); + crtc_state->nv12_planes &= ~BIT(plane->id); + crtc_state->c8_planes &= ~BIT(plane->id); + crtc_state->data_rate[plane->id] = 0; + crtc_state->min_cdclk[plane->id] = 0; + + plane_state->uapi.visible = false; +} + int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, @@ -273,12 +287,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ const struct drm_framebuffer *fb = new_plane_state->hw.fb; int ret; - new_crtc_state->active_planes &= ~BIT(plane->id); - new_crtc_state->nv12_planes &= ~BIT(plane->id); - new_crtc_state->c8_planes &= ~BIT(plane->id); - new_crtc_state->data_rate[plane->id] = 0; - new_crtc_state->min_cdclk[plane->id] = 0; - new_plane_state->uapi.visible = false; + intel_plane_set_invisible(new_crtc_state, new_plane_state); if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) return 0; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index a6bbf42bae1f..59dd1fbb02ea 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -52,5 +52,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, struct intel_plane *plane, bool *need_cdclk_calc); +void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); #endif /* __INTEL_ATOMIC_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 62f234f641de..57b80971ae78 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -252,14 +252,16 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta i = ARRAY_SIZE(hdmi_audio_clock); if (i == ARRAY_SIZE(hdmi_audio_clock)) { - DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", - adjusted_mode->crtc_clock); + drm_dbg_kms(&dev_priv->drm, + "HDMI audio pixel clock setting for %d not found, falling back to defaults\n", + adjusted_mode->crtc_clock); i = 1; } - DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n", - hdmi_audio_clock[i].clock, - hdmi_audio_clock[i].config); + drm_dbg_kms(&dev_priv->drm, + "Configuring HDMI audio for pixel clock %d (0x%08x)\n", + hdmi_audio_clock[i].clock, + hdmi_audio_clock[i].config); return hdmi_audio_clock[i].config; } @@ -891,7 +893,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); if (dev_priv->audio_power_refcount++ == 0) { - if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { intel_de_write(dev_priv, AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl); drm_dbg_kms(&dev_priv->drm, @@ -931,7 +933,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, unsigned long cookie; u32 tmp; - if (!IS_GEN(dev_priv, 9)) + if (INTEL_GEN(dev_priv) < 9) return; cookie = i915_audio_component_get_power(kdev); @@ -1173,7 +1175,7 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) return; } - if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { dev_priv->audio_freq_cntrl = intel_de_read(dev_priv, AUD_FREQ_CNTRL); drm_dbg_kms(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 58b264bc318d..88f367eb28ea 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -338,16 +338,17 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); bw_state->data_rate[crtc->pipe] = intel_bw_crtc_data_rate(crtc_state); bw_state->num_active_planes[crtc->pipe] = intel_bw_crtc_num_active_planes(crtc_state); - DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n", - pipe_name(crtc->pipe), - bw_state->data_rate[crtc->pipe], - bw_state->num_active_planes[crtc->pipe]); + drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n", + pipe_name(crtc->pipe), + bw_state->data_rate[crtc->pipe], + bw_state->num_active_planes[crtc->pipe]); } static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index c1cce93a1c25..98ece9cd7cdd 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -460,6 +460,16 @@ static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val) entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10); } +static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) +{ + entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 | + REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw); + entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 | + REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw); + entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 | + REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw); +} + static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -893,7 +903,7 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state, struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; - /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */ + /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red); intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green); intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue); @@ -1630,6 +1640,24 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) } } +static int icl_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0) + return 0; + + switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 10; + case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: + return 16; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } +} + int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1641,7 +1669,9 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat else return i9xx_gamma_precision(crtc_state); } else { - if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) + return icl_gamma_precision(crtc_state); + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) return glk_gamma_precision(crtc_state); else if (IS_IRONLAKE(dev_priv)) return ilk_gamma_precision(crtc_state); @@ -1658,9 +1688,9 @@ static bool err_check(struct drm_color_lut *lut1, ((abs((long)lut2->green - lut1->green)) <= err); } -static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1, - struct drm_color_lut *lut2, - int lut_size, u32 err) +static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1, + struct drm_color_lut *lut2, + int lut_size, u32 err) { int i; @@ -1690,16 +1720,8 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, lut_size2 = drm_color_lut_size(blob2); /* check sw and hw lut size */ - switch (gamma_mode) { - case GAMMA_MODE_MODE_8BIT: - case GAMMA_MODE_MODE_10BIT: - if (lut_size1 != lut_size2) - return false; - break; - default: - MISSING_CASE(gamma_mode); - return false; - } + if (lut_size1 != lut_size2) + return false; lut1 = blob1->data; lut2 = blob2->data; @@ -1707,11 +1729,16 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, err = 0xffff >> bit_precision; /* check sw and hw lut entry to be equal */ - switch (gamma_mode) { + switch (gamma_mode & GAMMA_MODE_MODE_MASK) { case GAMMA_MODE_MODE_8BIT: case GAMMA_MODE_MODE_10BIT: - if (!intel_color_lut_entry_equal(lut1, lut2, - lut_size2, err)) + if (!intel_color_lut_entries_equal(lut1, lut2, + lut_size2, err)) + return false; + break; + case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: + if (!intel_color_lut_entries_equal(lut1, lut2, + 9, err)) return false; break; default: @@ -1946,6 +1973,63 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state) crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); } +static struct drm_property_blob * +icl_read_lut_multi_segment(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *lut; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + lut = blob->data; + + intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), + PAL_PREC_AUTO_INCREMENT); + + for (i = 0; i < 9; i++) { + u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + + icl_lut_multi_seg_pack(&lut[i], ldw, udw); + } + + intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); + + /* + * FIXME readouts from PAL_PREC_DATA register aren't giving + * correct values in the case of fine and coarse segments. + * Restricting readouts only for super fine segment as of now. + */ + + return blob; +} + +static void icl_read_luts(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0) + return; + + switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { + case GAMMA_MODE_MODE_8BIT: + crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); + break; + case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: + crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc); + break; + default: + crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); + } +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1989,6 +2073,7 @@ void intel_color_init(struct intel_crtc *crtc) if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.load_luts = icl_load_luts; + dev_priv->display.read_luts = icl_read_luts; } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { dev_priv->display.load_luts = glk_load_luts; dev_priv->display.read_luts = glk_read_luts; diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 903e49659f56..98ec2ea86c7c 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -290,7 +290,7 @@ intel_attach_colorspace_property(struct drm_connector *connector) return; break; default: - DRM_DEBUG_KMS("Colorspace property not supported\n"); + MISSING_CASE(connector->connector_type); return; } diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 78f9b6cde810..a59ecbed0004 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -203,27 +203,31 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, intel_de_write(dev_priv, crt->adpa_reg, adpa); } -static void intel_disable_crt(struct intel_encoder *encoder, +static void intel_disable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF); } -static void pch_disable_crt(struct intel_encoder *encoder, +static void pch_disable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { } -static void pch_post_disable_crt(struct intel_encoder *encoder, +static void pch_post_disable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_disable_crt(encoder, old_crtc_state, old_conn_state); + intel_disable_crt(state, encoder, old_crtc_state, old_conn_state); } -static void hsw_disable_crt(struct intel_encoder *encoder, +static void hsw_disable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -234,7 +238,8 @@ static void hsw_disable_crt(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } -static void hsw_post_disable_crt(struct intel_encoder *encoder, +static void hsw_post_disable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -250,19 +255,20 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder, intel_ddi_disable_pipe_clock(old_crtc_state); - pch_post_disable_crt(encoder, old_crtc_state, old_conn_state); + pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); lpt_disable_pch_transcoder(dev_priv); lpt_disable_iclkip(dev_priv); - intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state); + intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state); drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } -static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder, +static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -273,7 +279,8 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } -static void hsw_pre_enable_crt(struct intel_encoder *encoder, +static void hsw_pre_enable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -290,7 +297,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder, intel_ddi_enable_pipe_clock(crtc_state); } -static void hsw_enable_crt(struct intel_encoder *encoder, +static void hsw_enable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -314,7 +322,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } -static void intel_enable_crt(struct intel_encoder *encoder, +static void intel_enable_crt(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -594,7 +603,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector, edid = drm_get_edid(connector, i2c); if (!edid && !intel_gmbus_is_forced_bit(i2c)) { - DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); + drm_dbg_kms(connector->dev, + "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); intel_gmbus_force_bit(i2c, true); edid = drm_get_edid(connector, i2c); intel_gmbus_force_bit(i2c, false); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 73d0f4648c06..be6c61bcbc9c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -568,7 +568,7 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ }; -static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = { +static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = { /* NT mV Trans mV db */ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ @@ -583,23 +583,51 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] }; struct icl_mg_phy_ddi_buf_trans { - u32 cri_txdeemph_override_5_0; u32 cri_txdeemph_override_11_6; + u32 cri_txdeemph_override_5_0; u32 cri_txdeemph_override_17_12; }; -static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = { +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = { + /* Voltage swing pre-emphasis */ + { 0x18, 0x00, 0x00 }, /* 0 0 */ + { 0x1D, 0x00, 0x05 }, /* 0 1 */ + { 0x24, 0x00, 0x0C }, /* 0 2 */ + { 0x2B, 0x00, 0x14 }, /* 0 3 */ + { 0x21, 0x00, 0x00 }, /* 1 0 */ + { 0x2B, 0x00, 0x08 }, /* 1 1 */ + { 0x30, 0x00, 0x0F }, /* 1 2 */ + { 0x31, 0x00, 0x03 }, /* 2 0 */ + { 0x34, 0x00, 0x0B }, /* 2 1 */ + { 0x3F, 0x00, 0x00 }, /* 3 0 */ +}; + +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = { /* Voltage swing pre-emphasis */ - { 0x0, 0x1B, 0x00 }, /* 0 0 */ - { 0x0, 0x23, 0x08 }, /* 0 1 */ - { 0x0, 0x2D, 0x12 }, /* 0 2 */ - { 0x0, 0x00, 0x00 }, /* 0 3 */ - { 0x0, 0x23, 0x00 }, /* 1 0 */ - { 0x0, 0x2B, 0x09 }, /* 1 1 */ - { 0x0, 0x2E, 0x11 }, /* 1 2 */ - { 0x0, 0x2F, 0x00 }, /* 2 0 */ - { 0x0, 0x33, 0x0C }, /* 2 1 */ - { 0x0, 0x00, 0x00 }, /* 3 0 */ + { 0x18, 0x00, 0x00 }, /* 0 0 */ + { 0x1D, 0x00, 0x05 }, /* 0 1 */ + { 0x24, 0x00, 0x0C }, /* 0 2 */ + { 0x2B, 0x00, 0x14 }, /* 0 3 */ + { 0x26, 0x00, 0x00 }, /* 1 0 */ + { 0x2C, 0x00, 0x07 }, /* 1 1 */ + { 0x33, 0x00, 0x0C }, /* 1 2 */ + { 0x2E, 0x00, 0x00 }, /* 2 0 */ + { 0x36, 0x00, 0x09 }, /* 2 1 */ + { 0x3F, 0x00, 0x00 }, /* 3 0 */ +}; + +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = { + /* HDMI Preset VS Pre-emph */ + { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */ + { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */ + { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */ + { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */ + { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */ + { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */ + { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */ + { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */ + { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */ + { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */ }; struct tgl_dkl_phy_ddi_buf_trans { @@ -943,13 +971,29 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, return icl_combo_phy_ddi_translations_dp_hbr2; } +static const struct icl_mg_phy_ddi_buf_trans * +icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, + int *n_entries) +{ + if (type == INTEL_OUTPUT_HDMI) { + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi); + return icl_mg_phy_ddi_translations_hdmi; + } else if (rate > 270000) { + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3); + return icl_mg_phy_ddi_translations_hbr2_hbr3; + } + + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr); + return icl_mg_phy_ddi_translations_rbr_hbr; +} + static const struct cnl_ddi_buf_trans * ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, int *n_entries) { - if (type == INTEL_OUTPUT_DP && rate > 270000) { - *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3); - return ehl_combo_phy_ddi_translations_hbr2_hbr3; + if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) { + *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp); + return ehl_combo_phy_ddi_translations_dp; } return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); @@ -959,7 +1003,7 @@ static const struct cnl_ddi_buf_trans * tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, int *n_entries) { - if (type != INTEL_OUTPUT_DP) { + if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); } else if (rate > 270000) { *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); @@ -988,7 +1032,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder) icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else - n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, + &n_entries); default_entry = n_entries - 1; } else if (IS_CANNONLAKE(dev_priv)) { cnl_get_buf_trans_hdmi(dev_priv, &n_entries); @@ -1102,7 +1147,8 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE) return; } - DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); + drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n", + port_name(port)); } static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) @@ -1249,7 +1295,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E)); if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { - DRM_DEBUG_KMS("FDI link training done on step %d\n", i); + drm_dbg_kms(&dev_priv->drm, + "FDI link training done on step %d\n", i); break; } @@ -1258,7 +1305,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, * Results in less fireworks from the state checker. */ if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) { - DRM_ERROR("FDI link training failed!\n"); + drm_err(&dev_priv->drm, "FDI link training failed!\n"); break; } @@ -1450,6 +1497,14 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp); } +static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder) +{ + if (master_transcoder == TRANSCODER_EDP) + return 0; + else + return master_transcoder + 1; +} + /* * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state. * @@ -1550,6 +1605,15 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) temp |= DDI_PORT_WIDTH(crtc_state->lane_count); } + if (IS_GEN_RANGE(dev_priv, 8, 10) && + crtc_state->master_transcoder != INVALID_TRANSCODER) { + u8 master_select = + bdw_trans_port_sync_master_select(crtc_state->master_transcoder); + + temp |= TRANS_DDI_PORT_SYNC_ENABLE | + TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select); + } + return temp; } @@ -1558,12 +1622,28 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 temp; + u32 ctl; + + if (INTEL_GEN(dev_priv) >= 11) { + enum transcoder master_transcoder = crtc_state->master_transcoder; + u32 ctl2 = 0; + + if (master_transcoder != INVALID_TRANSCODER) { + u8 master_select = + bdw_trans_port_sync_master_select(master_transcoder); - temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); + ctl2 |= PORT_SYNC_MODE_ENABLE | + PORT_SYNC_MODE_MASTER_SELECT(master_select); + } + + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2); + } + + ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) - temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); + ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); } /* @@ -1576,11 +1656,11 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 temp; + u32 ctl; - temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); - temp &= ~TRANS_DDI_FUNC_ENABLE; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); + ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state); + ctl &= ~TRANS_DDI_FUNC_ENABLE; + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); } void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) @@ -1588,24 +1668,35 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 val; + u32 ctl; + + if (INTEL_GEN(dev_priv) >= 11) + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0); + + ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); - val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); - val &= ~TRANS_DDI_FUNC_ENABLE; + ctl &= ~TRANS_DDI_FUNC_ENABLE; + + if (IS_GEN_RANGE(dev_priv, 8, 10)) + ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE | + TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK); if (INTEL_GEN(dev_priv) >= 12) { if (!intel_dp_mst_is_master_trans(crtc_state)) { - val &= ~(TGL_TRANS_DDI_PORT_MASK | + ctl &= ~(TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); } } else { - val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); + ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); } - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val); + + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n"); + drm_dbg_kms(&dev_priv->drm, + "Quirk Increase DDI disabled time\n"); /* Quirk time at 100ms for reliable operation */ msleep(100); } @@ -1666,7 +1757,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) + if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; @@ -1728,7 +1819,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out; - if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) { + if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) { tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); @@ -1786,20 +1877,23 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, } if (!*pipe_mask) - DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n", - encoder->base.base.id, encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "No pipe for [ENCODER:%d:%s] found\n", + encoder->base.base.id, encoder->base.name); if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { - DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n", - encoder->base.base.id, encoder->base.name, - *pipe_mask); + drm_dbg_kms(&dev_priv->drm, + "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n", + encoder->base.base.id, encoder->base.name, + *pipe_mask); *pipe_mask = BIT(ffs(*pipe_mask) - 1); } if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) - DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n", - encoder->base.base.id, encoder->base.name, - *pipe_mask, mst_pipe_mask); + drm_dbg_kms(&dev_priv->drm, + "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n", + encoder->base.base.id, encoder->base.name, + *pipe_mask, mst_pipe_mask); else *is_dp_mst = mst_pipe_mask; @@ -1809,9 +1903,9 @@ out: if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) - DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? " - "(PHY_CTL %08x)\n", encoder->base.base.id, - encoder->base.name, tmp); + drm_err(&dev_priv->drm, + "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n", + encoder->base.base.id, encoder->base.name, tmp); } intel_display_power_put(dev_priv, encoder->power_domain, wakeref); @@ -1869,7 +1963,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, return; dig_port = enc_to_dig_port(encoder); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC @@ -1973,7 +2071,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, /* Make sure that the requested I_boost is valid */ if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) { - DRM_ERROR("Invalid I_boost value %u\n", iboost); + drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost); return; } @@ -2032,7 +2130,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else - n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + icl_get_mg_buf_trans(dev_priv, encoder->type, + intel_dp->link_rate, &n_entries); } else if (IS_CANNONLAKE(dev_priv)) { if (encoder->type == INTEL_OUTPUT_EDP) cnl_get_buf_trans_edp(dev_priv, &n_entries); @@ -2232,7 +2331,9 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, return; if (level >= n_entries) { - DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1); + drm_dbg_kms(&dev_priv->drm, + "DDI translation not found for level %d. Using %d instead.", + level, n_entries - 1); level = n_entries - 1; } @@ -2345,21 +2446,28 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - int link_clock, - u32 level) + int link_clock, u32 level, + enum intel_output_type type) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct icl_mg_phy_ddi_buf_trans *ddi_translations; u32 n_entries, val; - int ln; + int ln, rate = 0; + + if (type != INTEL_OUTPUT_HDMI) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); - ddi_translations = icl_mg_phy_ddi_translations; + rate = intel_dp->link_rate; + } + + ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate, + &n_entries); /* The table does not have values for level 3 and level 9. */ if (level >= n_entries || level == 3 || level == 9) { - DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", - level, n_entries - 2); + drm_dbg_kms(&dev_priv->drm, + "DDI translation not found for level %d. Using %d instead.", + level, n_entries - 2); level = n_entries - 2; } @@ -2478,7 +2586,8 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, if (intel_phy_is_combo(dev_priv, phy)) icl_combo_phy_ddi_vswing_sequence(encoder, level, type); else - icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); + icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level, + type); } static void @@ -2693,8 +2802,9 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed)) continue; - DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", - phy_name(phy)); + drm_notice(&dev_priv->drm, + "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", + phy_name(phy)); val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); } @@ -2931,11 +3041,14 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + if (!crtc_state->fec_enable) return; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0) - DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n"); + drm_dbg_kms(&i915->drm, + "Failed to set FEC_READY in the sink\n"); } static void intel_ddi_enable_fec(struct intel_encoder *encoder, @@ -2955,7 +3068,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) - DRM_ERROR("Timed out waiting for FEC Enable Status\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for FEC Enable Status\n"); } static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, @@ -2975,7 +3089,8 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); } -static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, +static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3115,7 +3230,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dsc_enable(encoder, crtc_state); } -static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, +static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3188,16 +3304,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dsc_enable(encoder, crtc_state); } -static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, +static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (INTEL_GEN(dev_priv) >= 12) - tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); else - hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); /* MST will call a setting of MSA after an allocating of Virtual Channel * from MST encoder pre_enable callback. @@ -3209,7 +3326,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, } } -static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, +static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3249,7 +3367,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, crtc_state, conn_state); } -static void intel_ddi_pre_enable(struct intel_encoder *encoder, +static void intel_ddi_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3278,12 +3397,14 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); + intel_ddi_pre_enable_hdmi(state, encoder, crtc_state, + conn_state); } else { struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); - intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + intel_ddi_pre_enable_dp(state, encoder, crtc_state, + conn_state); if (lspcon->active) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); @@ -3326,7 +3447,8 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, intel_wait_ddi_buf_idle(dev_priv, port); } -static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, +static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3382,7 +3504,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_ddi_clk_disable(encoder); } -static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, +static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3405,22 +3528,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } -static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) - return; - - DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", - transcoder_name(old_crtc_state->cpu_transcoder)); - - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0); -} - -static void intel_ddi_post_disable(struct intel_encoder *encoder, +static void intel_ddi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3434,9 +3543,6 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, intel_disable_pipe(old_crtc_state); - if (INTEL_GEN(dev_priv) >= 11) - icl_disable_transcoder_port_sync(old_crtc_state); - intel_ddi_disable_transcoder_func(old_crtc_state); intel_dsc_disable(old_crtc_state); @@ -3461,11 +3567,11 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, */ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) - intel_ddi_post_disable_hdmi(encoder, - old_crtc_state, old_conn_state); + intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state, + old_conn_state); else - intel_ddi_post_disable_dp(encoder, - old_crtc_state, old_conn_state); + intel_ddi_post_disable_dp(state, encoder, old_crtc_state, + old_conn_state); if (INTEL_GEN(dev_priv) >= 11) icl_unmap_plls_to_ports(encoder); @@ -3478,7 +3584,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, intel_tc_port_put_link(dig_port); } -void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, +void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3512,7 +3619,43 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); } -static void intel_enable_ddi_dp(struct intel_encoder *encoder, +static void trans_port_sync_stop_link_train(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + const struct drm_connector_state *conn_state; + struct drm_connector *conn; + int i; + + if (!crtc_state->sync_mode_slaves_mask) + return; + + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { + struct intel_encoder *slave_encoder = + to_intel_encoder(conn_state->best_encoder); + struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc); + const struct intel_crtc_state *slave_crtc_state; + + if (!slave_crtc) + continue; + + slave_crtc_state = + intel_atomic_get_new_crtc_state(state, slave_crtc); + + if (slave_crtc_state->master_transcoder != + crtc_state->cpu_transcoder) + continue; + + intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder)); + } + + usleep_range(200, 400); + + intel_dp_stop_link_train(enc_to_intel_dp(encoder)); +} + +static void intel_enable_ddi_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3531,6 +3674,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder, if (crtc_state->has_audio) intel_audio_codec_enable(encoder, crtc_state, conn_state); + + trans_port_sync_stop_link_train(state, encoder, crtc_state); } static i915_reg_t @@ -3553,7 +3698,8 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, return CHICKEN_TRANS(trans[port]); } -static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, +static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3565,9 +3711,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, if (!intel_hdmi_handle_sink_scrambling(encoder, connector, crtc_state->hdmi_high_tmds_clock_ratio, crtc_state->hdmi_scrambling)) - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink " - "scrambling/TMDS bit clock ratio\n", - connector->base.id, connector->name); + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); /* Display WA #1143: skl,kbl,cfl */ if (IS_GEN9_BC(dev_priv)) { @@ -3615,7 +3761,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, intel_audio_codec_enable(encoder, crtc_state, conn_state); } -static void intel_enable_ddi(struct intel_encoder *encoder, +static void intel_enable_ddi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3626,9 +3773,9 @@ static void intel_enable_ddi(struct intel_encoder *encoder, intel_crtc_vblank_on(crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - intel_enable_ddi_hdmi(encoder, crtc_state, conn_state); + intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state); else - intel_enable_ddi_dp(encoder, crtc_state, conn_state); + intel_enable_ddi_dp(state, encoder, crtc_state, conn_state); /* Enable hdcp if it's desired */ if (conn_state->content_protection == @@ -3638,7 +3785,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder, (u8)conn_state->hdcp_content_type); } -static void intel_disable_ddi_dp(struct intel_encoder *encoder, +static void intel_disable_ddi_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3658,10 +3806,12 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder, false); } -static void intel_disable_ddi_hdmi(struct intel_encoder *encoder, +static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = old_conn_state->connector; if (old_crtc_state->has_audio) @@ -3670,23 +3820,28 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder, if (!intel_hdmi_handle_sink_scrambling(encoder, connector, false, false)) - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n", - connector->base.id, connector->name); + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); } -static void intel_disable_ddi(struct intel_encoder *encoder, +static void intel_disable_ddi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) - intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state); + intel_disable_ddi_hdmi(state, encoder, old_crtc_state, + old_conn_state); else - intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); + intel_disable_ddi_dp(state, encoder, old_crtc_state, + old_conn_state); } -static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, +static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3697,18 +3852,20 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, intel_psr_update(intel_dp, crtc_state); intel_edp_drrs_enable(intel_dp, crtc_state); - intel_panel_update_backlight(encoder, crtc_state, conn_state); + intel_panel_update_backlight(state, encoder, crtc_state, conn_state); } -static void intel_ddi_update_pipe(struct intel_encoder *encoder, +static void intel_ddi_update_pipe(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); + intel_ddi_update_pipe_dp(state, encoder, crtc_state, + conn_state); - intel_hdcp_update_pipe(encoder, crtc_state, conn_state); + intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state); } static void @@ -3737,7 +3894,8 @@ intel_ddi_update_complete(struct intel_atomic_state *state, } static void -intel_ddi_pre_pll_enable(struct intel_encoder *encoder, +intel_ddi_pre_pll_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -3837,6 +3995,66 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, crtc_state->min_voltage_level = 2; } +static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + u32 master_select; + + if (INTEL_GEN(dev_priv) >= 11) { + u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder)); + + if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0) + return INVALID_TRANSCODER; + + master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2); + } else { + u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); + + if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0) + return INVALID_TRANSCODER; + + master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl); + } + + if (master_select == 0) + return TRANSCODER_EDP; + else + return master_select - 1; +} + +static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D); + enum transcoder cpu_transcoder; + + crtc_state->master_transcoder = + bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder); + + for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { + enum intel_display_power_domain power_domain; + intel_wakeref_t trans_wakeref; + + power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); + trans_wakeref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + + if (!trans_wakeref) + continue; + + if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) == + crtc_state->cpu_transcoder) + crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); + + intel_display_power_put(dev_priv, power_domain, trans_wakeref); + } + + drm_WARN_ON(&dev_priv->drm, + crtc_state->master_transcoder != INVALID_TRANSCODER && + crtc_state->sync_mode_slaves_mask); +} + void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -3922,9 +4140,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->fec_enable = intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; - DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n", - encoder->base.base.id, encoder->base.name, - pipe_config->fec_enable); + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] Fec status: %u\n", + encoder->base.base.id, encoder->base.name, + pipe_config->fec_enable); } break; @@ -3961,8 +4180,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder, * up by the BIOS, and thus we can't get the mode at module * load. */ - DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", - pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); + drm_dbg_kms(&dev_priv->drm, + "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } @@ -3988,6 +4208,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_DRM, &pipe_config->infoframes.drm); + + if (INTEL_GEN(dev_priv) >= 8) + bdw_get_trans_port_sync_config(pipe_config); } static enum intel_output_type @@ -4017,7 +4240,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, enum port port = encoder->port; int ret; - if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) + if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) { @@ -4089,7 +4312,11 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state, u8 transcoders = 0; int i; - if (INTEL_GEN(dev_priv) < 11) + /* + * We don't enable port sync on BDW due to missing w/as and + * due to not having adjusted the modeset sequence appropriately. + */ + if (INTEL_GEN(dev_priv) < 9) return 0; if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP)) @@ -4121,12 +4348,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = conn_state->connector; u8 port_sync_transcoders = 0; - DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]", - encoder->base.base.id, encoder->base.name, - crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name); + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]", + encoder->base.base.id, encoder->base.name, + crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name); if (connector->has_tile) port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state, @@ -4265,7 +4493,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config); if (ret < 0) { - DRM_ERROR("Failed to read TMDS config: %d\n", ret); + drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n", + ret); return 0; } @@ -4289,15 +4518,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, static enum intel_hotplug_state intel_ddi_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector, - bool irq_received) + struct intel_connector *connector) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum phy phy = intel_port_to_phy(i915, encoder->port); + bool is_tc = intel_phy_is_tc(i915, phy); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; - state = intel_encoder_hotplug(encoder, connector, irq_received); + state = intel_encoder_hotplug(encoder, connector); drm_modeset_acquire_init(&ctx, 0); @@ -4335,8 +4566,15 @@ intel_ddi_hotplug(struct intel_encoder *encoder, * valid EDID. To solve this schedule another detection cycle if this * time around we didn't detect any change in the sink's connection * status. + * + * Type-c connectors which get their HPD signal deasserted then + * reasserted, without unplugging/replugging the sink from the + * connector, introduce a delay until the AUX channel communication + * becomes functional. Retry the detection for 5 seconds on type-c + * connectors to account for this delay. */ - if (state == INTEL_HOTPLUG_UNCHANGED && irq_received && + if (state == INTEL_HOTPLUG_UNCHANGED && + connector->hotplug_retries < (is_tc ? 5 : 1) && !dig_port->dp.is_mst) state = INTEL_HOTPLUG_RETRY; @@ -4411,7 +4649,8 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport) * so we use the proper lane count for our calculations. */ if (intel_ddi_a_force_4_lanes(intel_dport)) { - DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n"); + drm_dbg_kms(&dev_priv->drm, + "Forcing DDI_A_4_LANES for port A\n"); intel_dport->saved_port_bits |= DDI_A_4_LANES; max_lanes = 4; } @@ -4439,12 +4678,14 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) init_dp = true; init_lspcon = true; init_hdmi = false; - DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port)); + drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n", + port_name(port)); } if (!init_dp && !init_hdmi) { - DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n", - port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n", + port_name(port)); return; } @@ -4523,14 +4764,16 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) if (init_lspcon) { if (lspcon_init(intel_dig_port)) /* TODO: handle hdmi info frame part */ - DRM_DEBUG_KMS("LSPCON init success on port %c\n", - port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "LSPCON init success on port %c\n", + port_name(port)); else /* * LSPCON init faied, but DP init was success, so * lets try to drive as DP++ port. */ - DRM_ERROR("LSPCON init failed on port %c\n", + drm_err(&dev_priv->drm, + "LSPCON init failed on port %c\n", port_name(port)); } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 55fd72b901fe..de4cd877c002 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -17,7 +17,8 @@ struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; -void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, +void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *intel_encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state); void hsw_fdi_link_train(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 346846609f45..af5b4055b38a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -525,7 +525,7 @@ skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); } -/* Wa_2006604312:icl */ +/* Wa_2006604312:icl,ehl */ static void icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) @@ -544,17 +544,23 @@ needs_modeset(const struct intel_crtc_state *state) return drm_atomic_crtc_needs_modeset(&state->uapi); } -bool -is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) +static bool +is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) { - return (crtc_state->master_transcoder != INVALID_TRANSCODER || - crtc_state->sync_mode_slaves_mask); + return crtc_state->master_transcoder != INVALID_TRANSCODER; } static bool -is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) +is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) { - return crtc_state->master_transcoder != INVALID_TRANSCODER; + return crtc_state->sync_mode_slaves_mask != 0; +} + +bool +is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) +{ + return is_trans_port_sync_master(crtc_state) || + is_trans_port_sync_slave(crtc_state); } /* @@ -620,45 +626,43 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) return clock->dot / 5; } -#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) - /* * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. */ -static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, +static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, const struct intel_limit *limit, const struct dpll *clock) { - if (clock->n < limit->n.min || limit->n.max < clock->n) - INTELPllInvalid("n out of range\n"); - if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) - INTELPllInvalid("p1 out of range\n"); - if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) - INTELPllInvalid("m2 out of range\n"); - if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) - INTELPllInvalid("m1 out of range\n"); + if (clock->n < limit->n.min || limit->n.max < clock->n) + return false; + if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) + return false; + if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) + return false; + if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) + return false; if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) if (clock->m1 <= clock->m2) - INTELPllInvalid("m1 <= m2\n"); + return false; if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) { if (clock->p < limit->p.min || limit->p.max < clock->p) - INTELPllInvalid("p out of range\n"); + return false; if (clock->m < limit->m.min || limit->m.max < clock->m) - INTELPllInvalid("m out of range\n"); + return false; } if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) - INTELPllInvalid("vco out of range\n"); + return false; /* XXX: We may need to be checking "Dot clock" depending on the multiplier, * connector, etc., rather than just a single range. */ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) - INTELPllInvalid("dot out of range\n"); + return false; return true; } @@ -725,7 +729,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit, int this_err; i9xx_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(to_i915(dev), + if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; @@ -781,7 +785,7 @@ pnv_find_best_dpll(const struct intel_limit *limit, int this_err; pnv_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(to_i915(dev), + if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; @@ -842,7 +846,7 @@ g4x_find_best_dpll(const struct intel_limit *limit, int this_err; i9xx_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(to_i915(dev), + if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; @@ -939,7 +943,7 @@ vlv_find_best_dpll(const struct intel_limit *limit, vlv_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(to_i915(dev), + if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; @@ -1008,7 +1012,7 @@ chv_find_best_dpll(const struct intel_limit *limit, chv_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) + if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, @@ -2910,6 +2914,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub, static int intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) { + struct drm_i915_private *i915 = to_i915(fb->dev); struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); int main_plane; int hsub, vsub; @@ -2938,7 +2943,8 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) * x/y offsets must match between CCS and the main surface. */ if (main_x != ccs_x || main_y != ccs_y) { - DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", + drm_dbg_kms(&i915->drm, + "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", main_x, main_y, ccs_x, ccs_y, intel_fb->normal[main_plane].x, @@ -3336,6 +3342,8 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) return DRM_FORMAT_RGB565; case PLANE_CTL_FORMAT_NV12: return DRM_FORMAT_NV12; + case PLANE_CTL_FORMAT_XYUV: + return DRM_FORMAT_XYUV8888; case PLANE_CTL_FORMAT_P010: return DRM_FORMAT_P010; case PLANE_CTL_FORMAT_P012: @@ -4580,6 +4588,8 @@ static u32 skl_plane_ctl_format(u32 pixel_format) case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: return PLANE_CTL_FORMAT_XRGB_16161616F; + case DRM_FORMAT_XYUV8888: + return PLANE_CTL_FORMAT_XYUV; case DRM_FORMAT_YUYV: return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; case DRM_FORMAT_YVYU: @@ -4998,37 +5008,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc) intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); } -static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 trans_ddi_func_ctl2_val; - u8 master_select; - - /* - * Configure the master select and enable Transcoder Port Sync for - * Slave CRTCs transcoder. - */ - if (crtc_state->master_transcoder == INVALID_TRANSCODER) - return; - - if (crtc_state->master_transcoder == TRANSCODER_EDP) - master_select = 0; - else - master_select = crtc_state->master_transcoder + 1; - - /* Set the master select bits for Tranascoder Port Sync */ - trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) & - PORT_SYNC_MODE_MASTER_SELECT_MASK) << - PORT_SYNC_MODE_MASTER_SELECT_SHIFT; - /* Enable Transcoder Port Sync */ - trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE; - - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder), - trans_ddi_func_ctl2_val); -} - static void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -6200,6 +6179,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: @@ -6463,8 +6443,8 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - /* Wa_2006604312:icl */ - if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) + /* Wa_2006604312:icl,ehl */ + if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11)) return true; return false; @@ -6534,7 +6514,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, needs_nv12_wa(new_crtc_state)) skl_wa_827(dev_priv, pipe, true); - /* Wa_2006604312:icl */ + /* Wa_2006604312:icl,ehl */ if (!needs_scalerclk_wa(old_crtc_state) && needs_scalerclk_wa(new_crtc_state)) icl_wa_scalerclkgating(dev_priv, pipe, true); @@ -6720,7 +6700,8 @@ static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, continue; if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder, crtc_state, conn_state); + encoder->pre_pll_enable(state, encoder, + crtc_state, conn_state); } } @@ -6741,7 +6722,8 @@ static void intel_encoders_pre_enable(struct intel_atomic_state *state, continue; if (encoder->pre_enable) - encoder->pre_enable(encoder, crtc_state, conn_state); + encoder->pre_enable(state, encoder, + crtc_state, conn_state); } } @@ -6762,7 +6744,8 @@ static void intel_encoders_enable(struct intel_atomic_state *state, continue; if (encoder->enable) - encoder->enable(encoder, crtc_state, conn_state); + encoder->enable(state, encoder, + crtc_state, conn_state); intel_opregion_notify_encoder(encoder, true); } } @@ -6785,7 +6768,8 @@ static void intel_encoders_disable(struct intel_atomic_state *state, intel_opregion_notify_encoder(encoder, false); if (encoder->disable) - encoder->disable(encoder, old_crtc_state, old_conn_state); + encoder->disable(state, encoder, + old_crtc_state, old_conn_state); } } @@ -6806,7 +6790,8 @@ static void intel_encoders_post_disable(struct intel_atomic_state *state, continue; if (encoder->post_disable) - encoder->post_disable(encoder, old_crtc_state, old_conn_state); + encoder->post_disable(state, encoder, + old_crtc_state, old_conn_state); } } @@ -6827,7 +6812,8 @@ static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, continue; if (encoder->post_pll_disable) - encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); + encoder->post_pll_disable(state, encoder, + old_crtc_state, old_conn_state); } } @@ -6848,7 +6834,8 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state, continue; if (encoder->update_pipe) - encoder->update_pipe(encoder, crtc_state, conn_state); + encoder->update_pipe(state, encoder, + crtc_state, conn_state); } } @@ -7037,9 +7024,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (!transcoder_is_dsi(cpu_transcoder)) intel_set_pipe_timings(new_crtc_state); - if (INTEL_GEN(dev_priv) >= 11) - icl_enable_trans_port_sync(new_crtc_state); - intel_set_pipe_src_size(new_crtc_state); if (cpu_transcoder != TRANSCODER_EDP && @@ -9398,7 +9382,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; - pipe_config->master_transcoder = INVALID_TRANSCODER; ret = false; @@ -10622,7 +10605,6 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; - pipe_config->master_transcoder = INVALID_TRANSCODER; ret = false; tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); @@ -10891,7 +10873,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); - if (HAS_TRANSCODER_EDP(dev_priv)) + if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP)) panel_transcoder_mask |= BIT(TRANSCODER_EDP); /* @@ -11085,61 +11067,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc, } } -static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - u32 trans_port_sync, master_select; - - trans_port_sync = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL2(cpu_transcoder)); - - if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0) - return INVALID_TRANSCODER; - - master_select = trans_port_sync & - PORT_SYNC_MODE_MASTER_SELECT_MASK; - if (master_select == 0) - return TRANSCODER_EDP; - else - return master_select - 1; -} - -static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 transcoders; - enum transcoder cpu_transcoder; - - crtc_state->master_transcoder = transcoder_master_readout(dev_priv, - crtc_state->cpu_transcoder); - - transcoders = BIT(TRANSCODER_A) | - BIT(TRANSCODER_B) | - BIT(TRANSCODER_C) | - BIT(TRANSCODER_D); - for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { - enum intel_display_power_domain power_domain; - intel_wakeref_t trans_wakeref; - - power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); - trans_wakeref = intel_display_power_get_if_enabled(dev_priv, - power_domain); - - if (!trans_wakeref) - continue; - - if (transcoder_master_readout(dev_priv, cpu_transcoder) == - crtc_state->cpu_transcoder) - crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); - - intel_display_power_put(dev_priv, power_domain, trans_wakeref); - } - - drm_WARN_ON(&dev_priv->drm, - crtc_state->master_transcoder != INVALID_TRANSCODER && - crtc_state->sync_mode_slaves_mask); -} - static bool hsw_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -11271,10 +11198,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } - if (INTEL_GEN(dev_priv) >= 11 && - !transcoder_is_dsi(pipe_config->cpu_transcoder)) - icl_get_trans_port_sync_config(pipe_config); - out: for_each_power_domain(power_domain, power_domain_mask) intel_display_power_put(dev_priv, @@ -12377,10 +12300,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) { - plane_state->uapi.visible = visible = false; - crtc_state->active_planes &= ~BIT(plane->id); - crtc_state->data_rate[plane->id] = 0; - crtc_state->min_cdclk[plane->id] = 0; + intel_plane_set_invisible(crtc_state, plane_state); + visible = false; } if (!was_visible && !visible) @@ -12886,16 +12807,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, return 0; } -static void intel_dump_crtc_timings(const struct drm_display_mode *mode) +static void intel_dump_crtc_timings(struct drm_i915_private *i915, + const struct drm_display_mode *mode) { - DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " - "type: 0x%x flags: 0x%x\n", - mode->crtc_clock, - mode->crtc_hdisplay, mode->crtc_hsync_start, - mode->crtc_hsync_end, mode->crtc_htotal, - mode->crtc_vdisplay, mode->crtc_vsync_start, - mode->crtc_vsync_end, mode->crtc_vtotal, - mode->type, mode->flags); + drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " + "type: 0x%x flags: 0x%x\n", + mode->crtc_clock, + mode->crtc_hdisplay, mode->crtc_hsync_start, + mode->crtc_hsync_end, mode->crtc_htotal, + mode->crtc_vdisplay, mode->crtc_vsync_start, + mode->crtc_vsync_end, mode->crtc_vtotal, + mode->type, mode->flags); } static inline void @@ -13042,6 +12964,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, transcoder_name(pipe_config->cpu_transcoder), pipe_config->pipe_bpp, pipe_config->dither); + drm_dbg_kms(&dev_priv->drm, + "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", + transcoder_name(pipe_config->master_transcoder), + pipe_config->sync_mode_slaves_mask); + if (pipe_config->has_pch_encoder) intel_dump_m_n_config(pipe_config, "fdi", pipe_config->fdi_lanes, @@ -13079,7 +13006,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, drm_mode_debug_printmodeline(&pipe_config->hw.mode); drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); - intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode); + intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode); drm_dbg_kms(&dev_priv->drm, "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", pipe_config->port_clock, @@ -14999,11 +14926,13 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, } static void commit_pipe_config(struct intel_atomic_state *state, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); bool modeset = needs_modeset(new_crtc_state); /* @@ -15029,22 +14958,35 @@ static void commit_pipe_config(struct intel_atomic_state *state, dev_priv->display.atomic_update_watermarks(state, crtc); } -static void intel_update_crtc(struct intel_crtc *crtc, - struct intel_atomic_state *state, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) +static void intel_enable_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - bool modeset = needs_modeset(new_crtc_state); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); - if (modeset) { - intel_crtc_update_active_timings(new_crtc_state); + if (!needs_modeset(new_crtc_state)) + return; - dev_priv->display.crtc_enable(state, crtc); + intel_crtc_update_active_timings(new_crtc_state); - /* vblanks work again, re-enable pipe CRC. */ - intel_crtc_enable_pipe_crc(crtc); - } else { + dev_priv->display.crtc_enable(state, crtc); + + /* vblanks work again, re-enable pipe CRC. */ + intel_crtc_enable_pipe_crc(crtc); +} + +static void intel_update_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + bool modeset = needs_modeset(new_crtc_state); + + if (!modeset) { if (new_crtc_state->preload_luts && (new_crtc_state->uapi.color_mgmt_changed || new_crtc_state->update_pipe)) @@ -15064,7 +15006,7 @@ static void intel_update_crtc(struct intel_crtc *crtc, /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); - commit_pipe_config(state, old_crtc_state, new_crtc_state); + commit_pipe_config(state, crtc); if (INTEL_GEN(dev_priv) >= 9) skl_update_planes_on_crtc(state, crtc); @@ -15084,18 +15026,6 @@ static void intel_update_crtc(struct intel_crtc *crtc, intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } -static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); - enum transcoder slave_transcoder; - - drm_WARN_ON(&dev_priv->drm, - !is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); - - slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1; - return intel_get_crtc_for_pipe(dev_priv, - (enum pipe)slave_transcoder); -} static void intel_old_crtc_state_disables(struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, @@ -15171,129 +15101,19 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) static void intel_commit_modeset_enables(struct intel_atomic_state *state) { + struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; - struct intel_crtc_state *old_crtc_state, *new_crtc_state; int i; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (!new_crtc_state->hw.active) continue; - intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state); + intel_enable_crtc(state, crtc); + intel_update_crtc(state, crtc); } } -static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc, - struct intel_atomic_state *state, - struct intel_crtc_state *new_crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - - intel_crtc_update_active_timings(new_crtc_state); - dev_priv->display.crtc_enable(state, crtc); - intel_crtc_enable_pipe_crc(crtc); -} - -static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, - struct intel_atomic_state *state) -{ - struct drm_connector *uninitialized_var(conn); - struct drm_connector_state *conn_state; - struct intel_dp *intel_dp; - int i; - - for_each_new_connector_in_state(&state->base, conn, conn_state, i) { - if (conn_state->crtc == &crtc->base) - break; - } - intel_dp = intel_attached_dp(to_intel_connector(conn)); - intel_dp_stop_link_train(intel_dp); -} - -/* - * TODO: This is only called from port sync and it is identical to what will be - * executed again in intel_update_crtc() over port sync pipes - */ -static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, - struct intel_atomic_state *state) -{ - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - bool modeset = needs_modeset(new_crtc_state); - - if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) - intel_fbc_disable(crtc); - else - intel_fbc_enable(state, crtc); - - /* Perform vblank evasion around commit operation */ - intel_pipe_update_start(new_crtc_state); - commit_pipe_config(state, old_crtc_state, new_crtc_state); - skl_update_planes_on_crtc(state, crtc); - intel_pipe_update_end(new_crtc_state); - - /* - * We usually enable FIFO underrun interrupts as part of the - * CRTC enable sequence during modesets. But when we inherit a - * valid pipe configuration from the BIOS we need to take care - * of enabling them on the CRTC's first fastset. - */ - if (new_crtc_state->update_pipe && !modeset && - old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) - intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); -} - -static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc, - struct intel_atomic_state *state, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) -{ - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); - struct intel_crtc_state *new_slave_crtc_state = - intel_atomic_get_new_crtc_state(state, slave_crtc); - struct intel_crtc_state *old_slave_crtc_state = - intel_atomic_get_old_crtc_state(state, slave_crtc); - - drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state || - !old_slave_crtc_state); - - drm_dbg_kms(&i915->drm, - "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n", - crtc->base.base.id, crtc->base.name, - slave_crtc->base.base.id, slave_crtc->base.name); - - /* Enable seq for slave with with DP_TP_CTL left Idle until the - * master is ready - */ - intel_crtc_enable_trans_port_sync(slave_crtc, - state, - new_slave_crtc_state); - - /* Enable seq for master with with DP_TP_CTL left Idle */ - intel_crtc_enable_trans_port_sync(crtc, - state, - new_crtc_state); - - /* Set Slave's DP_TP_CTL to Normal */ - intel_set_dp_tp_ctl_normal(slave_crtc, - state); - - /* Set Master's DP_TP_CTL To Normal */ - usleep_range(200, 400); - intel_set_dp_tp_ctl_normal(crtc, - state); - - /* Now do the post crtc enable for all master and slaves */ - intel_post_crtc_enable_updates(slave_crtc, - state); - intel_post_crtc_enable_updates(crtc, - state); -} - static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); @@ -15365,8 +15185,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) entries[pipe] = new_crtc_state->wm.skl.ddb; update_pipes &= ~BIT(pipe); - intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state); + intel_update_crtc(state, crtc); /* * If this is an already active pipe, it's DDB changed, @@ -15381,67 +15200,62 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) } } + update_pipes = modeset_pipes; + /* * Enable all pipes that needs a modeset and do not depends on other * pipes */ - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; if ((modeset_pipes & BIT(pipe)) == 0) continue; if (intel_dp_mst_is_slave_trans(new_crtc_state) || - is_trans_port_sync_slave(new_crtc_state)) + is_trans_port_sync_master(new_crtc_state)) continue; - drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, I915_MAX_PIPES, pipe)); - - entries[pipe] = new_crtc_state->wm.skl.ddb; modeset_pipes &= ~BIT(pipe); - if (is_trans_port_sync_mode(new_crtc_state)) { - struct intel_crtc *slave_crtc; + intel_enable_crtc(state, crtc); + } - intel_update_trans_port_sync_crtcs(crtc, state, - old_crtc_state, - new_crtc_state); + /* + * Then we enable all remaining pipes that depend on other + * pipes: MST slaves and port sync masters. + */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + enum pipe pipe = crtc->pipe; - slave_crtc = intel_get_slave_crtc(new_crtc_state); - /* TODO: update entries[] of slave */ - modeset_pipes &= ~BIT(slave_crtc->pipe); + if ((modeset_pipes & BIT(pipe)) == 0) + continue; - } else { - intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state); - } + modeset_pipes &= ~BIT(pipe); + + intel_enable_crtc(state, crtc); } /* - * Finally enable all pipes that needs a modeset and depends on - * other pipes, right now it is only MST slaves as both port sync slave - * and master are enabled together + * Finally we do the plane updates/etc. for all pipes that got enabled. */ - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; - if ((modeset_pipes & BIT(pipe)) == 0) + if ((update_pipes & BIT(pipe)) == 0) continue; drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, entries, I915_MAX_PIPES, pipe)); entries[pipe] = new_crtc_state->wm.skl.ddb; - modeset_pipes &= ~BIT(pipe); + update_pipes &= ~BIT(pipe); - intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state); + intel_update_crtc(state, crtc); } drm_WARN_ON(&dev_priv->drm, modeset_pipes); - + drm_WARN_ON(&dev_priv->drm, update_pipes); } static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) @@ -18261,11 +18075,12 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) best_encoder = connector->base.state->best_encoder; connector->base.state->best_encoder = &encoder->base; + /* FIXME NULL atomic state passed! */ if (encoder->disable) - encoder->disable(encoder, crtc_state, + encoder->disable(NULL, encoder, crtc_state, connector->base.state); if (encoder->post_disable) - encoder->post_disable(encoder, crtc_state, + encoder->post_disable(NULL, encoder, crtc_state, connector->base.state); connector->base.state->best_encoder = best_encoder; @@ -18802,15 +18617,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) -static bool -has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) -{ - if (cpu_transcoder == TRANSCODER_EDP) - return HAS_TRANSCODER_EDP(dev_priv); - else - return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder); -} - struct intel_display_error_state { u32 power_well_driver; @@ -18919,7 +18725,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { enum transcoder cpu_transcoder = transcoders[i]; - if (!has_transcoder(dev_priv, cpu_transcoder)) + if (!HAS_TRANSCODER(dev_priv, cpu_transcoder)) continue; error->transcoder[i].available = true; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index adb1225a3480..cc7f287804d7 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -320,9 +320,13 @@ enum phy_fia { for_each_pipe(__dev_priv, __p) \ for_each_if((__mask) & BIT(__p)) -#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ +#define for_each_cpu_transcoder(__dev_priv, __t) \ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \ - for_each_if ((__mask) & (1 << (__t))) + for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) + +#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ + for_each_cpu_transcoder(__dev_priv, __t) \ + for_each_if ((__mask) & BIT(__t)) #define for_each_universal_plane(__dev_priv, __pipe, __p) \ for ((__p) = 0; \ diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 1e6eb7f2f72d..bdeea2e02642 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -631,15 +631,9 @@ static void intel_dp_info(struct seq_file *m, } static void intel_dp_mst_info(struct seq_file *m, - struct intel_connector *intel_connector) + struct intel_connector *intel_connector) { - struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); - struct intel_dp_mst_encoder *intel_mst = - enc_to_mst(intel_encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; - bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, - intel_connector->port); + bool has_audio = intel_connector->port->has_audio; seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); } @@ -1326,6 +1320,16 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data) intel_dp->compliance.test_data.vdisplay); seq_printf(m, "bpc: %u\n", intel_dp->compliance.test_data.bpc); + } else if (intel_dp->compliance.test_type == + DP_TEST_LINK_PHY_TEST_PATTERN) { + seq_printf(m, "pattern: %d\n", + intel_dp->compliance.test_data.phytest.phy_pattern); + seq_printf(m, "Number of lanes: %d\n", + intel_dp->compliance.test_data.phytest.num_lanes); + seq_printf(m, "Link Rate: %d\n", + intel_dp->compliance.test_data.phytest.link_rate); + seq_printf(m, "level: %02x\n", + intel_dp->train_set[0]); } } else seq_puts(m, "0"); @@ -1358,7 +1362,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) if (encoder && connector->status == connector_status_connected) { intel_dp = enc_to_intel_dp(encoder); - seq_printf(m, "%02lx", intel_dp->compliance.test_type); + seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); } else seq_puts(m, "0"); } @@ -1927,7 +1931,7 @@ static const struct { {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, }; -int intel_display_debugfs_register(struct drm_i915_private *i915) +void intel_display_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; int i; @@ -1940,9 +1944,9 @@ int intel_display_debugfs_register(struct drm_i915_private *i915) intel_display_debugfs_files[i].fops); } - return drm_debugfs_create_files(intel_display_debugfs_list, - ARRAY_SIZE(intel_display_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(intel_display_debugfs_list, + ARRAY_SIZE(intel_display_debugfs_list), + minor->debugfs_root, minor); } static int i915_panel_show(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h index a3bea1ce04c2..c922c1745bfe 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h @@ -10,10 +10,10 @@ struct drm_connector; struct drm_i915_private; #ifdef CONFIG_DEBUG_FS -int intel_display_debugfs_register(struct drm_i915_private *i915); +void intel_display_debugfs_register(struct drm_i915_private *i915); int intel_connector_debugfs_add(struct drm_connector *connector); #else -static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; } +static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {} static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; } #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 246e406bb385..03bdde19c8c9 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1873,20 +1873,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) static void print_power_domains(struct i915_power_domains *power_domains, const char *prefix, u64 mask) { + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); enum intel_display_power_domain domain; - DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); + drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); for_each_power_domain(domain, mask) - DRM_DEBUG_DRIVER("%s use_count %d\n", - intel_display_power_domain_str(domain), - power_domains->domain_use_count[domain]); + drm_dbg(&i915->drm, "%s use_count %d\n", + intel_display_power_domain_str(domain), + power_domains->domain_use_count[domain]); } static void print_async_put_domains_state(struct i915_power_domains *power_domains) { - DRM_DEBUG_DRIVER("async_put_wakeref %u\n", - power_domains->async_put_wakeref); + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); + + drm_dbg(&i915->drm, "async_put_wakeref %u\n", + power_domains->async_put_wakeref); print_power_domains(power_domains, "async_put_domains[0]", power_domains->async_put_domains[0]); @@ -4140,7 +4147,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX D TBT1", .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4151,7 +4158,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX E TBT2", .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4162,7 +4169,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX F TBT3", .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4173,7 +4180,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX G TBT4", .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4184,7 +4191,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX H TBT5", .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4195,7 +4202,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX I TBT6", .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4480,7 +4487,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices, "Invalid number of dbuf slices requested\n"); - DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices); + drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", + req_slices); /* * Might be running this in parallel to gen9_dc_off_power_well_enable @@ -5016,7 +5024,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) const struct buddy_page_mask *table; int i; - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) /* Wa_1409767108: tgl */ table = wa_1409767108_buddy_page_masks; else diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 5e00e611f077..ba8c08145c88 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -132,8 +132,7 @@ struct intel_encoder { u16 cloneable; u8 pipe_mask; enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder, - struct intel_connector *connector, - bool irq_received); + struct intel_connector *connector); enum intel_output_type (*compute_output_type)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); @@ -146,28 +145,35 @@ struct intel_encoder { void (*update_prepare)(struct intel_atomic_state *, struct intel_encoder *, struct intel_crtc *); - void (*pre_pll_enable)(struct intel_encoder *, + void (*pre_pll_enable)(struct intel_atomic_state *, + struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); - void (*pre_enable)(struct intel_encoder *, + void (*pre_enable)(struct intel_atomic_state *, + struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); - void (*enable)(struct intel_encoder *, + void (*enable)(struct intel_atomic_state *, + struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); void (*update_complete)(struct intel_atomic_state *, struct intel_encoder *, struct intel_crtc *); - void (*disable)(struct intel_encoder *, + void (*disable)(struct intel_atomic_state *, + struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); - void (*post_disable)(struct intel_encoder *, + void (*post_disable)(struct intel_atomic_state *, + struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); - void (*post_pll_disable)(struct intel_encoder *, + void (*post_pll_disable)(struct intel_atomic_state *, + struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); - void (*update_pipe)(struct intel_encoder *, + void (*update_pipe)(struct intel_atomic_state *, + struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); /* Read out the current hw state of this connector, returning true if @@ -425,11 +431,14 @@ struct intel_connector { struct edid *edid; struct edid *detect_edid; + /* Number of times hotplug detection was tried after an HPD interrupt */ + int hotplug_retries; + /* since POLL and HPD connectors may use the same HPD line keep the native state of connector->polled in case hotplug storm detection changes it */ u8 polled; - void *port; /* store this opaque as its illegal to dereference it */ + struct drm_dp_mst_port *port; struct intel_dp *mst_port; @@ -640,6 +649,16 @@ struct intel_crtc_scaler_state { #define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1) /* Flag to use the scanline counter instead of the pixel counter */ #define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2) +/* + * TE0 or TE1 flag is set if the crtc has a DSI encoder which + * is operating in command mode. + * Flag to use TE from DSI0 instead of VBI in command mode + */ +#define I915_MODE_FLAG_DSI_USE_TE0 (1<<3) +/* Flag to use TE from DSI1 instead of VBI in command mode */ +#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4) +/* Flag to indicate mipi dsi periodic command mode where we do not get TE */ +#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5) struct intel_wm_level { bool enable; @@ -1015,6 +1034,7 @@ struct intel_crtc_state { union hdmi_infoframe spd; union hdmi_infoframe hdmi; union hdmi_infoframe drm; + struct drm_dp_vsc_sdp vsc; } infoframes; /* HDMI scrambling status */ @@ -1238,6 +1258,7 @@ struct intel_dp_compliance_data { u8 video_pattern; u16 hdisplay, vdisplay; u8 bpc; + struct drm_dp_phy_test_params phytest; }; struct intel_dp_compliance { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0a417cd2af2b..d4fcc9583869 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -164,6 +164,17 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) }; int i, max_rate; + if (drm_dp_has_quirk(&intel_dp->desc, 0, + DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { + /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ + static const int quirk_rates[] = { 162000, 270000, 324000 }; + + memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); + intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); + + return; + } + max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { @@ -452,6 +463,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, u8 lane_count) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int index; index = intel_dp_rate_index(intel_dp->common_rates, @@ -462,7 +474,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, !intel_dp_can_link_train_fallback_for_edp(intel_dp, intel_dp->common_rates[index - 1], lane_count)) { - DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + drm_dbg_kms(&i915->drm, + "Retrying Link training for eDP with same parameters\n"); return 0; } intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; @@ -472,13 +485,14 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, !intel_dp_can_link_train_fallback_for_edp(intel_dp, intel_dp_max_common_rate(intel_dp), lane_count >> 1)) { - DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + drm_dbg_kms(&i915->drm, + "Retrying Link training for eDP with same parameters\n"); return 0; } intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); intel_dp->max_link_lane_count = lane_count >> 1; } else { - DRM_ERROR("Link Training Unsuccessful\n"); + drm_err(&i915->drm, "Link Training Unsuccessful\n"); return -1; } @@ -553,6 +567,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, int mode_hdisplay) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 min_slice_count, i; int max_slice_width; @@ -565,8 +580,9 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { - DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", - max_slice_width); + drm_dbg_kms(&i915->drm, + "Unsupported slice width %d by DP DSC Sink device\n", + max_slice_width); return 0; } /* Also take into account max slice width */ @@ -584,7 +600,8 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return valid_dsc_slicecount[i]; } - DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); + drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", + min_slice_count); return 0; } @@ -1374,7 +1391,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * lowest possible wakeup latency and so prevent the cpu from going into * deep sleep states. */ - pm_qos_update_request(&i915->pm_qos, 0); + cpu_latency_qos_update_request(&i915->pm_qos, 0); intel_dp_check_edp(intel_dp); @@ -1507,7 +1524,7 @@ done: ret = recv_bytes; out: - pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); if (vdd) edp_panel_vdd_off(intel_dp, false); @@ -1832,6 +1849,7 @@ static void snprintf_int_array(char *str, size_t len, static void intel_dp_print_rates(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); char str[128]; /* FIXME: too big for stack? */ if (!drm_debug_enabled(DRM_UT_KMS)) @@ -1839,15 +1857,15 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) snprintf_int_array(str, sizeof(str), intel_dp->source_rates, intel_dp->num_source_rates); - DRM_DEBUG_KMS("source rates: %s\n", str); + drm_dbg_kms(&i915->drm, "source rates: %s\n", str); snprintf_int_array(str, sizeof(str), intel_dp->sink_rates, intel_dp->num_sink_rates); - DRM_DEBUG_KMS("sink rates: %s\n", str); + drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); snprintf_int_array(str, sizeof(str), intel_dp->common_rates, intel_dp->num_common_rates); - DRM_DEBUG_KMS("common rates: %s\n", str); + drm_dbg_kms(&i915->drm, "common rates: %s\n", str); } int @@ -1954,6 +1972,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct link_config_limits *limits) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + /* For DP Compliance we override the computed bpp for the pipe */ if (intel_dp->compliance.test_data.bpc != 0) { int bpp = 3 * intel_dp->compliance.test_data.bpc; @@ -1961,7 +1981,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, limits->min_bpp = limits->max_bpp = bpp; pipe_config->dither_force_disable = bpp == 6 * 3; - DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp); + drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); } /* Use values requested by Compliance Test Request */ @@ -2055,6 +2075,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; u8 line_buf_depth; @@ -2089,7 +2110,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); if (!line_buf_depth) { - DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); + drm_dbg_kms(&i915->drm, + "DSC Sink Line Buffer Depth invalid\n"); return -EINVAL; } @@ -2114,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + const struct drm_display_mode *adjusted_mode = + &pipe_config->hw.adjusted_mode; u8 dsc_max_bpc; int pipe_bpp; int ret; @@ -2229,7 +2252,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct drm_display_mode *adjusted_mode = + &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; int common_len; @@ -2264,11 +2289,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); - DRM_DEBUG_KMS("DP link computation with max lane count %i " - "max rate %d max bpp %d pixel clock %iKHz\n", - limits.max_lane_count, - intel_dp->common_rates[limits.max_clock], - limits.max_bpp, adjusted_mode->crtc_clock); + drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " + "max rate %d max bpp %d pixel clock %iKHz\n", + limits.max_lane_count, + intel_dp->common_rates[limits.max_clock], + limits.max_bpp, adjusted_mode->crtc_clock); /* * Optimize for slow and wide. This is the place to add alternative @@ -2277,7 +2302,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); /* enable compression if the mode doesn't fit available BW */ - DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); + drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); if (ret || intel_dp->force_dsc_en) { ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, conn_state, &limits); @@ -2286,26 +2311,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, } if (pipe_config->dsc.compression_enable) { - DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", - pipe_config->lane_count, pipe_config->port_clock, - pipe_config->pipe_bpp, - pipe_config->dsc.compressed_bpp); - - DRM_DEBUG_KMS("DP link rate required %i available %i\n", - intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->dsc.compressed_bpp), - intel_dp_max_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); + drm_dbg_kms(&i915->drm, + "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp, + pipe_config->dsc.compressed_bpp); + + drm_dbg_kms(&i915->drm, + "DP link rate required %i available %i\n", + intel_dp_link_required(adjusted_mode->crtc_clock, + pipe_config->dsc.compressed_bpp), + intel_dp_max_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); } else { - DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", - pipe_config->lane_count, pipe_config->port_clock, - pipe_config->pipe_bpp); + drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp); - DRM_DEBUG_KMS("DP link rate required %i available %i\n", - intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->pipe_bpp), - intel_dp_max_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); + drm_dbg_kms(&i915->drm, + "DP link rate required %i available %i\n", + intel_dp_link_required(adjusted_mode->crtc_clock, + pipe_config->pipe_bpp), + intel_dp_max_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); } return 0; } @@ -2315,6 +2343,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, struct drm_connector *connector, struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); const struct drm_display_info *info = &connector->display_info; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -2331,7 +2360,8 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, /* YCBCR 420 output conversion needs a scaler */ ret = skl_update_scaler_crtc(crtc_state); if (ret) { - DRM_DEBUG_KMS("Scaler allocation for output failed\n"); + drm_dbg_kms(&i915->drm, + "Scaler allocation for output failed\n"); return ret; } @@ -2384,6 +2414,128 @@ static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, return true; } +static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, + struct drm_dp_vsc_sdp *vsc) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + /* + * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 + * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ + * Colorimetry Format indication. + */ + vsc->revision = 0x5; + vsc->length = 0x13; + + /* DP 1.4a spec, Table 2-120 */ + switch (crtc_state->output_format) { + case INTEL_OUTPUT_FORMAT_YCBCR444: + vsc->pixelformat = DP_PIXELFORMAT_YUV444; + break; + case INTEL_OUTPUT_FORMAT_YCBCR420: + vsc->pixelformat = DP_PIXELFORMAT_YUV420; + break; + case INTEL_OUTPUT_FORMAT_RGB: + default: + vsc->pixelformat = DP_PIXELFORMAT_RGB; + } + + switch (conn_state->colorspace) { + case DRM_MODE_COLORIMETRY_BT709_YCC: + vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; + break; + case DRM_MODE_COLORIMETRY_XVYCC_601: + vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; + break; + case DRM_MODE_COLORIMETRY_XVYCC_709: + vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; + break; + case DRM_MODE_COLORIMETRY_SYCC_601: + vsc->colorimetry = DP_COLORIMETRY_SYCC_601; + break; + case DRM_MODE_COLORIMETRY_OPYCC_601: + vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; + break; + case DRM_MODE_COLORIMETRY_BT2020_CYCC: + vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; + break; + case DRM_MODE_COLORIMETRY_BT2020_RGB: + vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; + break; + case DRM_MODE_COLORIMETRY_BT2020_YCC: + vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; + break; + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; + break; + default: + /* + * RGB->YCBCR color conversion uses the BT.709 + * color space. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; + else + vsc->colorimetry = DP_COLORIMETRY_DEFAULT; + break; + } + + vsc->bpc = crtc_state->pipe_bpp / 3; + + /* only RGB pixelformat supports 6 bpc */ + drm_WARN_ON(&dev_priv->drm, + vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); + + /* all YCbCr are always limited range */ + vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; + vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; +} + +static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; + + /* When PSR is enabled, VSC SDP is handled by PSR routine */ + if (intel_psr_enabled(intel_dp)) + return; + + if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) + return; + + crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); + vsc->sdp_type = DP_SDP_VSC; + intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, + &crtc_state->infoframes.vsc); +} + +static void +intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + int ret; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; + + if (!conn_state->hdr_output_metadata) + return; + + ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); + + if (ret) { + drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); + return; + } + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); +} + int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2489,6 +2641,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_dp_set_clock(encoder, pipe_config); intel_psr_compute_config(intel_dp, pipe_config); + intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); + intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); return 0; } @@ -2633,22 +2787,27 @@ static void wait_panel_status(struct intel_dp *intel_dp, static void wait_panel_on(struct intel_dp *intel_dp) { - DRM_DEBUG_KMS("Wait for panel power on\n"); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); } static void wait_panel_off(struct intel_dp *intel_dp) { - DRM_DEBUG_KMS("Wait for panel power off time\n"); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); } static void wait_panel_power_cycle(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); ktime_t panel_power_on_time; s64 panel_power_off_duration; - DRM_DEBUG_KMS("Wait for panel power cycle\n"); + drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); /* take the difference of currrent time and panel power off time * and then make panel wait for t11_t12 if needed. */ @@ -3012,11 +3171,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&i915->drm, "\n"); intel_panel_enable_backlight(crtc_state, conn_state); _intel_edp_backlight_on(intel_dp); @@ -3050,11 +3210,12 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&i915->drm, "\n"); _intel_edp_backlight_off(intel_dp); intel_panel_disable_backlight(old_conn_state); @@ -3067,6 +3228,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) static void intel_edp_backlight_power(struct intel_connector *connector, bool enable) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp = intel_attached_dp(connector); intel_wakeref_t wakeref; bool is_enabled; @@ -3077,8 +3239,8 @@ static void intel_edp_backlight_power(struct intel_connector *connector, if (is_enabled == enable) return; - DRM_DEBUG_KMS("panel power control backlight %s\n", - enable ? "enable" : "disable"); + drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", + enable ? "enable" : "disable"); if (enable) _intel_edp_backlight_on(intel_dp); @@ -3188,6 +3350,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int ret; if (!crtc_state->dsc.compression_enable) @@ -3196,13 +3359,15 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, enable ? DP_DECOMPRESSION_EN : 0); if (ret < 0) - DRM_DEBUG_KMS("Failed to %s sink decompression state\n", - enable ? "enable" : "disable"); + drm_dbg_kms(&i915->drm, + "Failed to %s sink decompression state\n", + enable ? "enable" : "disable"); } /* If the sink supports it, try to set the power state appropriately */ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int ret, i; /* Should have a valid DPCD by this point */ @@ -3235,8 +3400,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) } if (ret != 1) - DRM_DEBUG_KMS("failed to %s sink power state\n", - mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); + drm_dbg_kms(&i915->drm, "failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); } static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, @@ -3393,7 +3558,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } } -static void intel_disable_dp(struct intel_encoder *encoder, +static void intel_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3413,21 +3579,24 @@ static void intel_disable_dp(struct intel_encoder *encoder, intel_edp_panel_off(intel_dp); } -static void g4x_disable_dp(struct intel_encoder *encoder, +static void g4x_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_disable_dp(encoder, old_crtc_state, old_conn_state); + intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); } -static void vlv_disable_dp(struct intel_encoder *encoder, +static void vlv_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_disable_dp(encoder, old_crtc_state, old_conn_state); + intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); } -static void g4x_post_disable_dp(struct intel_encoder *encoder, +static void g4x_post_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3447,14 +3616,16 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder, ilk_edp_pll_off(intel_dp, old_crtc_state); } -static void vlv_post_disable_dp(struct intel_encoder *encoder, +static void vlv_post_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_dp_link_down(encoder, old_crtc_state); } -static void chv_post_disable_dp(struct intel_encoder *encoder, +static void chv_post_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -3580,7 +3751,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } -static void intel_enable_dp(struct intel_encoder *encoder, +static void intel_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -3626,22 +3798,25 @@ static void intel_enable_dp(struct intel_encoder *encoder, } } -static void g4x_enable_dp(struct intel_encoder *encoder, +static void g4x_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - intel_enable_dp(encoder, pipe_config, conn_state); + intel_enable_dp(state, encoder, pipe_config, conn_state); intel_edp_backlight_on(pipe_config, conn_state); } -static void vlv_enable_dp(struct intel_encoder *encoder, +static void vlv_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { intel_edp_backlight_on(pipe_config, conn_state); } -static void g4x_pre_enable_dp(struct intel_encoder *encoder, +static void g4x_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -3761,16 +3936,18 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, intel_dp_init_panel_power_sequencer_registers(intel_dp, true); } -static void vlv_pre_enable_dp(struct intel_encoder *encoder, +static void vlv_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { vlv_phy_pre_encoder_enable(encoder, pipe_config); - intel_enable_dp(encoder, pipe_config, conn_state); + intel_enable_dp(state, encoder, pipe_config, conn_state); } -static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, +static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -3779,19 +3956,21 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, vlv_phy_pre_pll_enable(encoder, pipe_config); } -static void chv_pre_enable_dp(struct intel_encoder *encoder, +static void chv_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { chv_phy_pre_encoder_enable(encoder, pipe_config); - intel_enable_dp(encoder, pipe_config, conn_state); + intel_enable_dp(state, encoder, pipe_config, conn_state); /* Second common lane will stay alive on its own now */ chv_phy_release_cl2_override(encoder); } -static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, +static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -3800,7 +3979,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, chv_phy_pre_pll_enable(encoder, pipe_config); } -static void chv_dp_post_pll_disable(struct intel_encoder *encoder, +static void chv_dp_post_pll_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -4319,6 +4499,7 @@ intel_dp_link_down(struct intel_encoder *encoder, static void intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 dpcd_ext[6]; /* @@ -4334,20 +4515,22 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { - DRM_ERROR("DPCD failed read at extended capabilities\n"); + drm_err(&i915->drm, + "DPCD failed read at extended capabilities\n"); return; } if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { - DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n"); + drm_dbg_kms(&i915->drm, + "DPCD extended DPCD rev less than base DPCD rev\n"); return; } if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) return; - DRM_DEBUG_KMS("Base DPCD: %*ph\n", - (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); + drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n", + (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); } @@ -4355,13 +4538,16 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) bool intel_dp_read_dpcd(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) return false; /* aux transfer failed */ intel_dp_extended_receiver_capabilities(intel_dp); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); + drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd), + intel_dp->dpcd); return intel_dp->dpcd[DP_DPCD_REV] != 0; } @@ -4378,6 +4564,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + /* * Clear the cached register set to avoid using stale values * for the sinks that do not support DSC. @@ -4393,20 +4581,23 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, intel_dp->dsc_dpcd, sizeof(intel_dp->dsc_dpcd)) < 0) - DRM_ERROR("Failed to read DPCD register 0x%x\n", - DP_DSC_SUPPORT); + drm_err(&i915->drm, + "Failed to read DPCD register 0x%x\n", + DP_DSC_SUPPORT); - DRM_DEBUG_KMS("DSC DPCD: %*ph\n", - (int)sizeof(intel_dp->dsc_dpcd), - intel_dp->dsc_dpcd); + drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", + (int)sizeof(intel_dp->dsc_dpcd), + intel_dp->dsc_dpcd); /* FEC is supported only on DP 1.4 */ if (!intel_dp_is_edp(intel_dp) && drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, &intel_dp->fec_capable) < 0) - DRM_ERROR("Failed to read FEC DPCD register\n"); + drm_err(&i915->drm, + "Failed to read FEC DPCD register\n"); - DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable); + drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", + intel_dp->fec_capable); } } @@ -4580,14 +4771,16 @@ intel_dp_can_mst(struct intel_dp *intel_dp) static void intel_dp_configure_mst(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); - DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", - encoder->base.base.id, encoder->base.name, - yesno(intel_dp->can_mst), yesno(sink_can_mst), - yesno(i915_modparams.enable_dp_mst)); + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", + encoder->base.base.id, encoder->base.name, + yesno(intel_dp->can_mst), yesno(sink_can_mst), + yesno(i915_modparams.enable_dp_mst)); if (!intel_dp->can_mst) return; @@ -4633,6 +4826,205 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, return false; } +static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, + struct dp_sdp *sdp, size_t size) +{ + size_t length = sizeof(struct dp_sdp); + + if (size < length) + return -ENOSPC; + + memset(sdp, 0, size); + + /* + * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 + * VSC SDP Header Bytes + */ + sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ + sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ + sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ + sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ + + /* VSC SDP Payload for DB16 through DB18 */ + /* Pixel Encoding and Colorimetry Formats */ + sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ + sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ + + switch (vsc->bpc) { + case 6: + /* 6bpc: 0x0 */ + break; + case 8: + sdp->db[17] = 0x1; /* DB17[3:0] */ + break; + case 10: + sdp->db[17] = 0x2; + break; + case 12: + sdp->db[17] = 0x3; + break; + case 16: + sdp->db[17] = 0x4; + break; + default: + MISSING_CASE(vsc->bpc); + break; + } + /* Dynamic Range and Component Bit Depth */ + if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) + sdp->db[17] |= 0x80; /* DB17[7] */ + + /* Content Type */ + sdp->db[18] = vsc->content_type & 0x7; + + return length; +} + +static ssize_t +intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, + struct dp_sdp *sdp, + size_t size) +{ + size_t length = sizeof(struct dp_sdp); + const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; + unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; + ssize_t len; + + if (size < length) + return -ENOSPC; + + memset(sdp, 0, size); + + len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); + if (len < 0) { + DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); + return -ENOSPC; + } + + if (len != infoframe_size) { + DRM_DEBUG_KMS("wrong static hdr metadata size\n"); + return -ENOSPC; + } + + /* + * Set up the infoframe sdp packet for HDR static metadata. + * Prepare VSC Header for SU as per DP 1.4a spec, + * Table 2-100 and Table 2-101 + */ + + /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ + sdp->sdp_header.HB0 = 0; + /* + * Packet Type 80h + Non-audio INFOFRAME Type value + * HDMI_INFOFRAME_TYPE_DRM: 0x87 + * - 80h + Non-audio INFOFRAME Type value + * - InfoFrame Type: 0x07 + * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] + */ + sdp->sdp_header.HB1 = drm_infoframe->type; + /* + * Least Significant Eight Bits of (Data Byte Count – 1) + * infoframe_size - 1 + */ + sdp->sdp_header.HB2 = 0x1D; + /* INFOFRAME SDP Version Number */ + sdp->sdp_header.HB3 = (0x13 << 2); + /* CTA Header Byte 2 (INFOFRAME Version Number) */ + sdp->db[0] = drm_infoframe->version; + /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ + sdp->db[1] = drm_infoframe->length; + /* + * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after + * HDMI_INFOFRAME_HEADER_SIZE + */ + BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); + memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], + HDMI_DRM_INFOFRAME_SIZE); + + /* + * Size of DP infoframe sdp packet for HDR static metadata consists of + * - DP SDP Header(struct dp_sdp_header): 4 bytes + * - Two Data Blocks: 2 bytes + * CTA Header Byte2 (INFOFRAME Version Number) + * CTA Header Byte3 (Length of INFOFRAME) + * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes + * + * Prior to GEN11's GMP register size is identical to DP HDR static metadata + * infoframe size. But GEN11+ has larger than that size, write_infoframe + * will pad rest of the size. + */ + return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; +} + +static void intel_write_dp_sdp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct dp_sdp sdp = {}; + ssize_t len; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(type)) == 0) + return; + + switch (type) { + case DP_SDP_VSC: + len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, + sizeof(sdp)); + break; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, + &sdp, sizeof(sdp)); + break; + default: + MISSING_CASE(type); + return; + } + + if (drm_WARN_ON(&dev_priv->drm, len < 0)) + return; + + intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); +} + +void intel_dp_set_infoframes(struct intel_encoder *encoder, + bool enable, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); + u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | + VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | + VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; + u32 val = intel_de_read(dev_priv, reg); + + /* TODO: Add DSC case (DIP_ENABLE_PPS) */ + /* When PSR is enabled, this routine doesn't disable VSC DIP */ + if (intel_psr_enabled(intel_dp)) + val &= ~dip_enable; + else + val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); + + if (!enable) { + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); + return; + } + + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); + + /* When PSR is enabled, VSC SDP is handled by PSR routine */ + if (!intel_psr_enabled(intel_dp)) + intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); + + intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); +} + static void intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, @@ -4762,6 +5154,7 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct dp_sdp infoframe_sdp = {}; struct hdmi_drm_infoframe drm_infoframe = {}; @@ -4772,18 +5165,20 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state); if (ret) { - DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n"); + drm_dbg_kms(&i915->drm, + "couldn't set HDR metadata in infoframe\n"); return; } len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf)); if (len < 0) { - DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); + drm_dbg_kms(&i915->drm, + "buffer size is smaller than hdr metadata infoframe\n"); return; } if (len != infoframe_size) { - DRM_DEBUG_KMS("wrong static hdr metadata size\n"); + drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); return; } @@ -4861,6 +5256,7 @@ void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp, static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int status = 0; int test_link_rate; u8 test_lane_count, test_link_bw; @@ -4872,7 +5268,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) &test_lane_count); if (status <= 0) { - DRM_DEBUG_KMS("Lane count read failed\n"); + drm_dbg_kms(&i915->drm, "Lane count read failed\n"); return DP_TEST_NAK; } test_lane_count &= DP_MAX_LANE_COUNT_MASK; @@ -4880,7 +5276,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, &test_link_bw); if (status <= 0) { - DRM_DEBUG_KMS("Link Rate read failed\n"); + drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); return DP_TEST_NAK; } test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); @@ -4898,6 +5294,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 test_pattern; u8 test_misc; __be16 h_width, v_height; @@ -4907,7 +5304,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, &test_pattern); if (status <= 0) { - DRM_DEBUG_KMS("Test pattern read failed\n"); + drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); return DP_TEST_NAK; } if (test_pattern != DP_COLOR_RAMP) @@ -4916,21 +5313,21 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, &h_width, 2); if (status <= 0) { - DRM_DEBUG_KMS("H Width read failed\n"); + drm_dbg_kms(&i915->drm, "H Width read failed\n"); return DP_TEST_NAK; } status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, &v_height, 2); if (status <= 0) { - DRM_DEBUG_KMS("V Height read failed\n"); + drm_dbg_kms(&i915->drm, "V Height read failed\n"); return DP_TEST_NAK; } status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, &test_misc); if (status <= 0) { - DRM_DEBUG_KMS("TEST MISC read failed\n"); + drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); return DP_TEST_NAK; } if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) @@ -4959,6 +5356,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 test_result = DP_TEST_ACK; struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_connector *connector = &intel_connector->base; @@ -4975,9 +5373,10 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) */ if (intel_dp->aux.i2c_nack_count > 0 || intel_dp->aux.i2c_defer_count > 0) - DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", - intel_dp->aux.i2c_nack_count, - intel_dp->aux.i2c_defer_count); + drm_dbg_kms(&i915->drm, + "EDID read had %d NACKs, %d DEFERs\n", + intel_dp->aux.i2c_nack_count, + intel_dp->aux.i2c_defer_count); intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; } else { struct edid *block = intel_connector->detect_edid; @@ -4989,7 +5388,8 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, block->checksum) <= 0) - DRM_DEBUG_KMS("Failed to write EDID checksum\n"); + drm_dbg_kms(&i915->drm, + "Failed to write EDID checksum\n"); test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; @@ -5001,43 +5401,217 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) return test_result; } +static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) +{ + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + + if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { + DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); + return DP_TEST_NAK; + } + + /* + * link_mst is set to false to avoid executing mst related code + * during compliance testing. + */ + intel_dp->link_mst = false; + + return DP_TEST_ACK; +} + +static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = + to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + enum pipe pipe = crtc->pipe; + u32 pattern_val; + + switch (data->phy_pattern) { + case DP_PHY_TEST_PATTERN_NONE: + DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); + intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); + break; + case DP_PHY_TEST_PATTERN_D10_2: + DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); + intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); + break; + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); + intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | + DDI_DP_COMP_CTL_SCRAMBLED_0); + break; + case DP_PHY_TEST_PATTERN_PRBS7: + DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); + intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); + break; + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + /* + * FIXME: Ideally pattern should come from DPCD 0x250. As + * current firmware of DPR-100 could not set it, so hardcoding + * now for complaince test. + */ + DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); + pattern_val = 0x3e0f83e0; + intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); + pattern_val = 0x0f83e0f8; + intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); + pattern_val = 0x0000f83e; + intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); + intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | + DDI_DP_COMP_CTL_CUSTOM80); + break; + case DP_PHY_TEST_PATTERN_CP2520: + /* + * FIXME: Ideally pattern should come from DPCD 0x24A. As + * current firmware of DPR-100 could not set it, so hardcoding + * now for complaince test. + */ + DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); + pattern_val = 0xFB; + intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | + pattern_val); + break; + default: + WARN(1, "Invalid Phy Test Pattern\n"); + } +} + +static void +intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + enum pipe pipe = crtc->pipe; + u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; + + trans_ddi_func_ctl_value = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(pipe)); + trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); + dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); + + trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | + TGL_TRANS_DDI_PORT_MASK); + trans_conf_value &= ~PIPECONF_ENABLE; + dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; + + intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), + trans_ddi_func_ctl_value); + intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); +} + +static void +intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum port port = intel_dig_port->base.port; + struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + enum pipe pipe = crtc->pipe; + u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; + + trans_ddi_func_ctl_value = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(pipe)); + trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); + dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); + + trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | + TGL_TRANS_DDI_SELECT_PORT(port); + trans_conf_value |= PIPECONF_ENABLE; + dp_tp_ctl_value |= DP_TP_CTL_ENABLE; + + intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); + intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), + trans_ddi_func_ctl_value); +} + +void intel_dp_process_phy_request(struct intel_dp *intel_dp) +{ + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_DEBUG_KMS("failed to get link status\n"); + return; + } + + /* retrieve vswing & pre-emphasis setting */ + intel_dp_get_adjust_train(intel_dp, link_status); + + intel_dp_autotest_phy_ddi_disable(intel_dp); + + intel_dp_set_signal_levels(intel_dp); + + intel_dp_phy_pattern_update(intel_dp); + + intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); + + drm_dp_set_phy_test_pattern(&intel_dp->aux, data, + link_status[DP_DPCD_REV]); +} + static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { u8 test_result = DP_TEST_NAK; + + test_result = intel_dp_prepare_phytest(intel_dp); + if (test_result != DP_TEST_ACK) + DRM_ERROR("Phy test preparation failed\n"); + + intel_dp_process_phy_request(intel_dp); + return test_result; } static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 response = DP_TEST_NAK; u8 request = 0; int status; status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); if (status <= 0) { - DRM_DEBUG_KMS("Could not read test request from sink\n"); + drm_dbg_kms(&i915->drm, + "Could not read test request from sink\n"); goto update_status; } switch (request) { case DP_TEST_LINK_TRAINING: - DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); + drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); response = intel_dp_autotest_link_training(intel_dp); break; case DP_TEST_LINK_VIDEO_PATTERN: - DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); + drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); response = intel_dp_autotest_video_pattern(intel_dp); break; case DP_TEST_LINK_EDID_READ: - DRM_DEBUG_KMS("EDID test requested\n"); + drm_dbg_kms(&i915->drm, "EDID test requested\n"); response = intel_dp_autotest_edid(intel_dp); break; case DP_TEST_LINK_PHY_TEST_PATTERN: - DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); response = intel_dp_autotest_phy_pattern(intel_dp); break; default: - DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", + request); break; } @@ -5047,12 +5621,14 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp) update_status: status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); if (status <= 0) - DRM_DEBUG_KMS("Could not write test response to sink\n"); + drm_dbg_kms(&i915->drm, + "Could not write test response to sink\n"); } static int intel_dp_check_mst_status(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool bret; if (intel_dp->is_mst) { @@ -5069,12 +5645,13 @@ go_again: /* check link status - esi[10] = 0x200c */ if (intel_dp->active_mst_links > 0 && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { - DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); + drm_dbg_kms(&i915->drm, + "channel EQ not ok, retraining\n"); intel_dp_start_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); } - DRM_DEBUG_KMS("got esi %3ph\n", esi); + drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); if (handled) { @@ -5090,7 +5667,8 @@ go_again: bret = intel_dp_get_sink_irq_esi(intel_dp, esi); if (bret == true) { - DRM_DEBUG_KMS("got esi2 %3ph\n", esi); + drm_dbg_kms(&i915->drm, + "got esi2 %3ph\n", esi); goto go_again; } } else @@ -5098,7 +5676,8 @@ go_again: return ret; } else { - DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); + drm_dbg_kms(&i915->drm, + "failed to get ESI - device may have failed\n"); intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); @@ -5220,14 +5799,13 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, */ static enum intel_hotplug_state intel_dp_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector, - bool irq_received) + struct intel_connector *connector) { struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; - state = intel_encoder_hotplug(encoder, connector, irq_received); + state = intel_encoder_hotplug(encoder, connector); drm_modeset_acquire_init(&ctx, 0); @@ -5251,7 +5829,7 @@ intel_dp_hotplug(struct intel_encoder *encoder, * Keeping it consistent with intel_ddi_hotplug() and * intel_hdmi_hotplug(). */ - if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) + if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) state = INTEL_HOTPLUG_RETRY; return state; @@ -5259,6 +5837,7 @@ intel_dp_hotplug(struct intel_encoder *encoder, static void intel_dp_check_service_irq(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 val; if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) @@ -5277,7 +5856,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp) intel_hdcp_handle_cp_irq(intel_dp->attached_connector); if (val & DP_SINK_SPECIFIC_IRQ) - DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); + drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); } /* @@ -5344,6 +5923,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) static enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); u8 *dpcd = intel_dp->dpcd; u8 type; @@ -5391,7 +5971,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) } /* Anything else is out of spec, warn and ignore */ - DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); + drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); return connector_status_disconnected; } @@ -5863,6 +6443,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) static int intel_dp_connector_register(struct drm_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); int ret; @@ -5872,8 +6453,8 @@ intel_dp_connector_register(struct drm_connector *connector) intel_connector_debugfs_add(connector); - DRM_DEBUG_KMS("registering %s bus for %s\n", - intel_dp->aux.name, connector->kdev->kobj.name); + drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", + intel_dp->aux.name, connector->kdev->kobj.name); intel_dp->aux.dev = connector->kdev; ret = drm_dp_aux_register(&intel_dp->aux); @@ -5959,6 +6540,7 @@ static int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, u8 *an) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); static const struct drm_dp_aux_msg msg = { .request = DP_AUX_NATIVE_WRITE, @@ -5973,8 +6555,9 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, an, DRM_HDCP_AN_LEN); if (dpcd_ret != DRM_HDCP_AN_LEN) { - DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n", - dpcd_ret); + drm_dbg_kms(&i915->drm, + "Failed to write An over DP/AUX (%zd)\n", + dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; } @@ -5990,17 +6573,19 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, rxbuf, sizeof(rxbuf), DP_AUX_CH_CTL_AUX_AKSV_SELECT); if (ret < 0) { - DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, + "Write Aksv over DP/AUX failed (%d)\n", ret); return ret; } else if (ret == 0) { - DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n"); + drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); return -EIO; } reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; if (reply != DP_AUX_NATIVE_REPLY_ACK) { - DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", - reply); + drm_dbg_kms(&i915->drm, + "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", + reply); return -EIO; } return 0; @@ -6009,11 +6594,14 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, u8 *bksv) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret != DRM_HDCP_KSV_LEN) { - DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret); + drm_dbg_kms(&i915->drm, + "Read Bksv from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -6022,7 +6610,9 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, u8 *bstatus) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; + /* * For some reason the HDMI and DP HDCP specs call this register * definition by different names. In the HDMI spec, it's called BSTATUS, @@ -6031,7 +6621,8 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret != DRM_HDCP_BSTATUS_LEN) { - DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -6041,12 +6632,14 @@ static int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, u8 *bcaps) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, bcaps, 1); if (ret != 1) { - DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret); + drm_dbg_kms(&i915->drm, + "Read bcaps from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -6072,11 +6665,14 @@ static int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, u8 *ri_prime) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret != DRM_HDCP_RI_LEN) { - DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret); + drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", + ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -6086,12 +6682,15 @@ static int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, bool *ksv_ready) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; u8 bstatus; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } *ksv_ready = bstatus & DP_BSTATUS_READY; @@ -6102,6 +6701,7 @@ static int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, int num_downstream, u8 *ksv_fifo) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; int i; @@ -6113,8 +6713,9 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, ksv_fifo + i * DRM_HDCP_KSV_LEN, len); if (ret != len) { - DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n", - i, ret); + drm_dbg_kms(&i915->drm, + "Read ksv[%d] from DP/AUX failed (%zd)\n", + i, ret); return ret >= 0 ? -EIO : ret; } } @@ -6125,6 +6726,7 @@ static int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, int i, u32 *part) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) @@ -6134,7 +6736,8 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, DP_AUX_HDCP_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret != DRM_HDCP_V_PRIME_PART_LEN) { - DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); + drm_dbg_kms(&i915->drm, + "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -6151,13 +6754,15 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, static bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; u8 bstatus; ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); return false; } @@ -6232,13 +6837,15 @@ static inline int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, u8 *rx_status) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); ssize_t ret; ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, HDCP_2_2_DP_RXSTATUS_LEN); if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { - DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -6282,6 +6889,7 @@ static ssize_t intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_dp *dp = &intel_dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; u8 msg_id = hdcp2_msg_data->msg_id; @@ -6313,8 +6921,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, } if (ret) - DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n", - hdcp2_msg_data->msg_id, ret, timeout); + drm_dbg_kms(&i915->drm, + "msg_id %d, ret %d, timeout(mSec): %d\n", + hdcp2_msg_data->msg_id, ret, timeout); return ret; } @@ -6400,6 +7009,7 @@ static int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, u8 msg_id, void *buf, size_t size) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_recv, len; @@ -6433,7 +7043,8 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, (void *)byte, len); if (ret < 0) { - DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret); + drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", + msg_id, ret); return ret; } @@ -6724,7 +7335,11 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, if (ret) return ret; - if (INTEL_GEN(dev_priv) < 11) + /* + * We don't enable port sync on BDW due to missing w/as and + * due to not having adjusted the modeset sequence appropriately. + */ + if (INTEL_GEN(dev_priv) < 9) return 0; if (!intel_connector_needs_modeset(state, conn)) @@ -6763,28 +7378,45 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { .destroy = intel_dp_encoder_destroy, }; +static bool intel_edp_have_power(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + bool have_power = false; + + with_pps_lock(intel_dp, wakeref) { + have_power = edp_have_panel_power(intel_dp) && + edp_have_panel_vdd(intel_dp); + } + + return have_power; +} + enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_dp *intel_dp = &intel_dig_port->dp; - if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { + if (intel_dig_port->base.type == INTEL_OUTPUT_EDP && + (long_hpd || !intel_edp_have_power(intel_dp))) { /* - * vdd off can generate a long pulse on eDP which + * vdd off can generate a long/short pulse on eDP which * would require vdd on to handle it, and thus we * would end up in an endless cycle of - * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." + * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." */ - DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + drm_dbg_kms(&i915->drm, + "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", + long_hpd ? "long" : "short", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); return IRQ_HANDLED; } - DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name, - long_hpd ? "long" : "short"); + drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name, + long_hpd ? "long" : "short"); if (long_hpd) { intel_dp->reset_link_params = true; @@ -6797,8 +7429,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) * If we were in MST mode, and device is not * there, get out of MST mode */ - DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", - intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + drm_dbg_kms(&i915->drm, + "MST device may have disappeared %d vs %d\n", + intel_dp->is_mst, + intel_dp->mst_mgr.mst_state); intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 0c7be8ed1423..6659ce15a693 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -114,7 +114,11 @@ void intel_dp_vsc_enable(struct intel_dp *intel_dp, void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); +void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool intel_digital_port_connected(struct intel_encoder *encoder); +void intel_dp_process_phy_request(struct intel_dp *intel_dp); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 3e706bb850a8..4b916468540f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -27,6 +27,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 reg_val = 0; /* Early return when display use other mechanism to enable backlight. */ @@ -35,8 +36,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, ®_val) < 0) { - DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", - DP_EDP_DISPLAY_CONTROL_REGISTER); + drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); return; } if (enable) @@ -46,8 +47,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, reg_val) != 1) { - DRM_DEBUG_KMS("Failed to %s aux backlight\n", - enable ? "enable" : "disable"); + drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n", + enable ? "enable" : "disable"); } } @@ -58,6 +59,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 read_val[2] = { 0x0 }; u8 mode_reg; u16 level = 0; @@ -65,8 +67,9 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg) != 1) { - DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n", - DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + drm_dbg_kms(&i915->drm, + "Failed to read the DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); return 0; } @@ -80,8 +83,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, &read_val, sizeof(read_val)) < 0) { - DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", - DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); return 0; } level = read_val[0]; @@ -100,6 +103,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 vals[2] = { 0x0 }; vals[0] = level; @@ -111,7 +115,8 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev } if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, vals, sizeof(vals)) < 0) { - DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + drm_dbg_kms(&i915->drm, + "Failed to write aux backlight level\n"); return; } } @@ -133,7 +138,8 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) freq = dev_priv->vbt.backlight.pwm_freq_hz; if (!freq) { - DRM_DEBUG_KMS("Use panel default backlight frequency\n"); + drm_dbg_kms(&dev_priv->drm, + "Use panel default backlight frequency\n"); return false; } @@ -146,13 +152,14 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); if (fxp_min > fxp_actual || fxp_actual > fxp_max) { - DRM_DEBUG_KMS("Actual frequency out of range\n"); + drm_dbg_kms(&dev_priv->drm, "Actual frequency out of range\n"); return false; } if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) { - DRM_DEBUG_KMS("Failed to write aux backlight freq\n"); + drm_dbg_kms(&dev_priv->drm, + "Failed to write aux backlight freq\n"); return false; } return true; @@ -163,13 +170,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_panel *panel = &connector->panel; u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { - DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", - DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); return; } @@ -186,7 +194,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, panel->backlight.pwmgen_bit_count) < 0) - DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n"); + drm_dbg_kms(&i915->drm, + "Failed to write aux pwmgen bit count\n"); break; @@ -203,7 +212,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st if (new_dpcd_buf != dpcd_buf) { if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { - DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + drm_dbg_kms(&i915->drm, + "Failed to write aux backlight mode\n"); } } @@ -237,9 +247,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) * minimum value will applied automatically. So no need to check that. */ freq = i915->vbt.backlight.pwm_freq_hz; - DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq); + drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n", + freq); if (!freq) { - DRM_DEBUG_KMS("Use panel default backlight frequency\n"); + drm_dbg_kms(&i915->drm, + "Use panel default backlight frequency\n"); return max_backlight; } @@ -254,12 +266,14 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) */ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) { - DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n"); + drm_dbg_kms(&i915->drm, + "Failed to read pwmgen bit count cap min\n"); return max_backlight; } if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) { - DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n"); + drm_dbg_kms(&i915->drm, + "Failed to read pwmgen bit count cap max\n"); return max_backlight; } pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; @@ -268,7 +282,8 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) { - DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n"); + drm_dbg_kms(&i915->drm, + "VBT defined backlight frequency out of range\n"); return max_backlight; } @@ -279,10 +294,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) break; } - DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn); + drm_dbg_kms(&i915->drm, "Using eDP pwmgen bit count of %d\n", pn); if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) { - DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n"); + drm_dbg_kms(&i915->drm, + "Failed to write aux pwmgen bit count\n"); return max_backlight; } panel->backlight.pwmgen_bit_count = pn; @@ -312,6 +328,7 @@ static bool intel_dp_aux_display_control_capable(struct intel_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); /* Check the eDP Display control capabilities registers to determine if * the panel can support backlight control over the aux channel @@ -319,7 +336,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && !(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { - DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); return true; } return false; @@ -329,8 +346,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder); - struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (i915_modparams.enable_dpcd_backlight == 0 || !intel_dp_aux_display_control_capable(intel_connector)) @@ -340,17 +356,17 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) * There are a lot of machines that don't advertise the backlight * control interface to use properly in their VBIOS, :\ */ - if (dev_priv->vbt.backlight.type != + if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE && !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks, DP_QUIRK_FORCE_DPCD_BACKLIGHT)) { - DRM_DEV_INFO(dev->dev, - "Panel advertises DPCD backlight support, but " - "VBT disagrees. If your backlight controls " - "don't work try booting with " - "i915.enable_dpcd_backlight=1. If your machine " - "needs this, please file a _new_ bug report on " - "drm/i915, see " FDO_BUG_URL " for details.\n"); + drm_info(&i915->drm, + "Panel advertises DPCD backlight support, but " + "VBT disagrees. If your backlight controls " + "don't work try booting with " + "i915.enable_dpcd_backlight=1. If your machine " + "needs this, please file a _new_ bug report on " + "drm/i915, see " FDO_BUG_URL " for details.\n"); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index a7defb37ab00..e4f1843170b7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -34,9 +34,8 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) link_status[3], link_status[4], link_status[5]); } -static void -intel_get_adjust_train(struct intel_dp *intel_dp, - const u8 link_status[DP_LINK_STATUS_SIZE]) +void intel_dp_get_adjust_train(struct intel_dp *intel_dp, + const u8 link_status[DP_LINK_STATUS_SIZE]) { u8 v = 0; u8 p = 0; @@ -219,7 +218,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Update training set as requested by target */ - intel_get_adjust_train(intel_dp, link_status); + intel_dp_get_adjust_train(intel_dp, link_status); if (!intel_dp_update_link_train(intel_dp)) { drm_err(&i915->drm, "failed to update link training\n"); @@ -338,7 +337,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) } /* Update training set as requested by target */ - intel_get_adjust_train(intel_dp, link_status); + intel_dp_get_adjust_train(intel_dp, link_status); if (!intel_dp_update_link_train(intel_dp)) { drm_err(&i915->drm, "failed to update link training\n"); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 174566adcc92..01f1dabbb060 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -6,8 +6,12 @@ #ifndef __INTEL_DP_LINK_TRAINING_H__ #define __INTEL_DP_LINK_TRAINING_H__ +#include <drm/drm_dp_helper.h> + struct intel_dp; +void intel_dp_get_adjust_train(struct intel_dp *intel_dp, + const u8 link_status[DP_LINK_STATUS_SIZE]); void intel_dp_start_link_train(struct intel_dp *intel_dp); void intel_dp_stop_link_train(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 44f3fd251ca1..a83f910d8e15 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -47,9 +47,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_mst->primary->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - void *port = connector->port; bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_CONSTANT_N); int bpp, slots = -EINVAL; @@ -65,7 +65,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, false); slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, - port, crtc_state->pbn, 0); + connector->port, + crtc_state->pbn, 0); if (slots == -EDEADLK) return slots; if (slots >= 0) @@ -73,7 +74,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, } if (slots < 0) { - DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); + drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n", + slots); return slots; } @@ -88,56 +90,10 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, return 0; } -/* - * Iterate over all connectors and return the smallest transcoder in the MST - * stream - */ -static enum transcoder -intel_dp_mst_master_trans_compute(struct intel_atomic_state *state, - struct intel_dp *mst_port) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_digital_connector_state *conn_state; - struct intel_connector *connector; - enum pipe ret = I915_MAX_PIPES; - int i; - - if (INTEL_GEN(dev_priv) < 12) - return INVALID_TRANSCODER; - - for_each_new_intel_connector_in_state(state, connector, conn_state, i) { - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc; - - if (connector->mst_port != mst_port || !conn_state->base.crtc) - continue; - - crtc = to_intel_crtc(conn_state->base.crtc); - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (!crtc_state->uapi.active) - continue; - - /* - * Using crtc->pipe because crtc_state->cpu_transcoder is - * computed, so others CRTCs could have non-computed - * cpu_transcoder - */ - if (crtc->pipe < ret) - ret = crtc->pipe; - } - - if (ret == I915_MAX_PIPES) - return INVALID_TRANSCODER; - - /* Simple cast works because TGL don't have a eDP transcoder */ - return (enum transcoder)ret; -} - static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; @@ -147,7 +103,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - void *port = connector->port; struct link_config_limits limits; int ret; @@ -158,8 +113,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = false; if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - pipe_config->has_audio = - drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port); + pipe_config->has_audio = connector->port->has_audio; else pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; @@ -201,7 +155,56 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); - pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp); + return 0; +} + +/* + * Iterate over all connectors and return a mask of + * all CPU transcoders streaming over the same DP link. + */ +static unsigned int +intel_dp_mst_transcoder_mask(struct intel_atomic_state *state, + struct intel_dp *mst_port) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_digital_connector_state *conn_state; + struct intel_connector *connector; + u8 transcoders = 0; + int i; + + if (INTEL_GEN(dev_priv) < 12) + return 0; + + for_each_new_intel_connector_in_state(state, connector, conn_state, i) { + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (connector->mst_port != mst_port || !conn_state->base.crtc) + continue; + + crtc = to_intel_crtc(conn_state->base.crtc); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->hw.active) + continue; + + transcoders |= BIT(crtc_state->cpu_transcoder); + } + + return transcoders; +} + +static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + struct intel_dp *intel_dp = &intel_mst->primary->dp; + + /* lowest numbered transcoder will be designated master */ + crtc_state->mst_master_transcoder = + ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1; return 0; } @@ -313,7 +316,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, return ret; } -static void intel_mst_disable_dp(struct intel_encoder *encoder, +static void intel_mst_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -322,22 +326,25 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret; - DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); + drm_dbg_kms(&i915->drm, "active links %d\n", + intel_dp->active_mst_links); drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); if (ret) { - DRM_DEBUG_KMS("failed to update payload %d\n", ret); + drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); } if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); } -static void intel_mst_post_disable_dp(struct intel_encoder *encoder, +static void intel_mst_post_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -371,7 +378,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_ACT_SENT, 1)) - DRM_ERROR("Timed out waiting for ACT sent when disabling\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for ACT sent when disabling\n"); drm_dp_check_act_status(&intel_dp->mst_mgr); drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); @@ -402,13 +410,15 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, intel_mst->connector = NULL; if (last_mst_stream) - intel_dig_port->base.post_disable(&intel_dig_port->base, + intel_dig_port->base.post_disable(state, &intel_dig_port->base, old_crtc_state, NULL); - DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); + drm_dbg_kms(&dev_priv->drm, "active links %d\n", + intel_dp->active_mst_links); } -static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, +static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -417,11 +427,12 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_dig_port->dp; if (intel_dp->active_mst_links == 0) - intel_dig_port->base.pre_pll_enable(&intel_dig_port->base, + intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base, pipe_config, NULL); } -static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, +static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -445,7 +456,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, INTEL_GEN(dev_priv) >= 12 && first_mst_stream && !intel_dp_mst_is_master_trans(pipe_config)); - DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); + drm_dbg_kms(&dev_priv->drm, "active links %d\n", + intel_dp->active_mst_links); if (first_mst_stream) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); @@ -453,7 +465,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); if (first_mst_stream) - intel_dig_port->base.pre_enable(&intel_dig_port->base, + intel_dig_port->base.pre_enable(state, &intel_dig_port->base, pipe_config, NULL); ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, @@ -461,7 +473,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, pipe_config->pbn, pipe_config->dp_m_n.tu); if (!ret) - DRM_ERROR("failed to allocate vcpi\n"); + drm_err(&dev_priv->drm, "failed to allocate vcpi\n"); intel_dp->active_mst_links++; temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status); @@ -484,7 +496,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, intel_dp_set_m_n(pipe_config, M1_N1); } -static void intel_mst_enable_dp(struct intel_encoder *encoder, +static void intel_mst_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -499,11 +512,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, intel_crtc_vblank_on(pipe_config); - DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); + drm_dbg_kms(&dev_priv->drm, "active links %d\n", + intel_dp->active_mst_links); if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_ACT_SENT, 1)) - DRM_ERROR("Timed out waiting for ACT sent\n"); + drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n"); drm_dp_check_act_status(&intel_dp->mst_mgr); @@ -786,6 +800,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_encoder->pipe_mask = ~0; intel_encoder->compute_config = intel_dp_mst_compute_config; + intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index a2a937109a5a..afa4e6817e8c 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -31,20 +31,21 @@ int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi) int intel_dsi_get_modes(struct drm_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_display_mode *mode; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&i915->drm, "\n"); if (!intel_connector->panel.fixed_mode) { - DRM_DEBUG_KMS("no fixed mode\n"); + drm_dbg_kms(&i915->drm, "no fixed mode\n"); return 0; } mode = drm_mode_duplicate(connector->dev, intel_connector->panel.fixed_mode); if (!mode) { - DRM_DEBUG_KMS("drm_mode_duplicate failed\n"); + drm_dbg_kms(&i915->drm, "drm_mode_duplicate failed\n"); return 0; } @@ -60,7 +61,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 574dcfec9577..3c9c05478a03 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -453,8 +453,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_device *drm_dev = intel_dsi->base.base.dev; - struct device *dev = &drm_dev->pdev->dev; + struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); struct i2c_adapter *adapter; struct i2c_msg msg; int ret; @@ -471,7 +470,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) adapter = i2c_get_adapter(intel_dsi->i2c_bus_num); if (!adapter) { - DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n"); + drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n"); goto err_bus; } @@ -489,9 +488,9 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) ret = i2c_transfer(adapter, &msg, 1); if (ret < 0) - DRM_DEV_ERROR(dev, - "Failed to xfer payload of size (%u) to reg (%u)\n", - payload_size, reg_offset); + drm_err(&i915->drm, + "Failed to xfer payload of size (%u) to reg (%u)\n", + payload_size, reg_offset); kfree(payload_data); err_alloc: diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 341d5ce8b062..5cd09034519b 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -183,7 +183,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; } -static void intel_disable_dvo(struct intel_encoder *encoder, +static void intel_disable_dvo(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -197,7 +198,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder, intel_de_read(dev_priv, dvo_reg); } -static void intel_enable_dvo(struct intel_encoder *encoder, +static void intel_enable_dvo(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -272,7 +274,8 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder, return 0; } -static void intel_dvo_pre_enable(struct intel_encoder *encoder, +static void intel_dvo_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 2e5d835a9eaa..56bcd6c52a02 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -104,7 +104,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) /* Wait for compressing bit to clear */ if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, FBC_STAT_COMPRESSING, 10)) { - DRM_DEBUG_KMS("FBC idle timed out\n"); + drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); return; } } @@ -485,7 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, if (!ret) goto err_llb; else if (ret > 1) { - DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + drm_info(&dev_priv->drm, + "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); } @@ -521,8 +522,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, dev_priv->dsm.start + compressed_llb->start); } - DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", - fbc->compressed_fb.size, fbc->threshold); + drm_dbg_kms(&dev_priv->drm, + "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", + fbc->compressed_fb.size, fbc->threshold); return 0; @@ -531,7 +533,7 @@ err_fb: i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); err_llb: if (drm_mm_initialized(&dev_priv->mm.stolen)) - pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } @@ -606,6 +608,19 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, } } +static bool rotation_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format, unsigned int rotation) +{ + if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + drm_rotation_90_or_270(rotation)) + return false; + else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && + rotation != DRM_MODE_ROTATE_0) + return false; + + return true; +} + /* * For some reason, the hardware tracking starts looking at whatever we * programmed as the display plane base address register. It does not look at @@ -640,6 +655,22 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } +static bool tiling_is_valid(struct drm_i915_private *dev_priv, + uint64_t modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + if (INTEL_GEN(dev_priv) >= 9) + return true; + return false; + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + return true; + default: + return false; + } +} + static void intel_fbc_update_state_cache(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -673,6 +704,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->fb.format = fb->format; cache->fb.stride = fb->pitches[0]; + cache->fb.modifier = fb->modifier; drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && !plane_state->vma->fence); @@ -746,29 +778,39 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } - /* The use of a CPU fence is mandatory in order to detect writes - * by the CPU to the scanout and trigger updates to the FBC. + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. + * + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. * * Note that is possible for a tiled surface to be unmappable (and - * so have no fence associated with it) due to aperture constaints + * so have no fence associated with it) due to aperture constraints * at the time of pinning. * * FIXME with 90/270 degree rotation we should use the fence on * the normal GTT view (the rotated view doesn't even have a * fence). Would need changes to the FBC fence Y offset as well. - * For now this will effecively disable FBC with 90/270 degree + * For now this will effectively disable FBC with 90/270 degree * rotation. */ - if (cache->fence_id < 0) { + if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) { fbc->no_fbc_reason = "framebuffer not tiled or fenced"; return false; } - if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && - cache->plane.rotation != DRM_MODE_ROTATE_0) { + + if (!rotation_is_valid(dev_priv, cache->fb.format->format, + cache->plane.rotation)) { fbc->no_fbc_reason = "rotation unsupported"; return false; } + if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { + fbc->no_fbc_reason = "tiling unsupported"; + return false; + } + if (!stride_is_valid(dev_priv, cache->fb.stride)) { fbc->no_fbc_reason = "framebuffer stride not supported"; return false; @@ -948,7 +990,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) drm_WARN_ON(&dev_priv->drm, !fbc->crtc); drm_WARN_ON(&dev_priv->drm, fbc->active); - DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); + drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); __intel_fbc_cleanup_cfb(dev_priv); @@ -1176,7 +1219,8 @@ void intel_fbc_enable(struct intel_atomic_state *state, else cache->gen9_wa_cfb_stride = 0; - DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); + drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; fbc->crtc = crtc; @@ -1238,7 +1282,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) if (fbc->underrun_detected || !fbc->crtc) goto out; - DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n"); + drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); fbc->underrun_detected = true; intel_fbc_deactivate(dev_priv, "FIFO underrun"); @@ -1264,7 +1308,8 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) return ret; if (dev_priv->fbc.underrun_detected) { - DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n"); + drm_dbg_kms(&dev_priv->drm, + "Re-allowing FBC after fifo underrun\n"); dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; } @@ -1335,7 +1380,8 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ if (intel_vtd_active() && (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { - DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + drm_info(&dev_priv->drm, + "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); return true; } @@ -1363,8 +1409,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) mkwrite_device_info(dev_priv)->display.has_fbc = false; i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); - DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", - i915_modparams.enable_fbc); + drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", + i915_modparams.enable_fbc); if (!HAS_FBC(dev_priv)) { fbc->no_fbc_reason = "unsupported by this chipset"; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 3bc804212a99..bd39eb6a21b8 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, if (IS_ERR(obj)) obj = i915_gem_object_create_shmem(dev_priv, size); if (IS_ERR(obj)) { - DRM_ERROR("failed to allocate framebuffer\n"); + drm_err(&dev_priv->drm, "failed to allocate framebuffer\n"); return PTR_ERR(obj); } @@ -183,21 +183,23 @@ static int intelfb_create(struct drm_fb_helper *helper, if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { - DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," - " releasing it\n", - intel_fb->base.width, intel_fb->base.height, - sizes->fb_width, sizes->fb_height); + drm_dbg_kms(&dev_priv->drm, + "BIOS fb too small (%dx%d), we require (%dx%d)," + " releasing it\n", + intel_fb->base.width, intel_fb->base.height, + sizes->fb_width, sizes->fb_height); drm_framebuffer_put(&intel_fb->base); intel_fb = ifbdev->fb = NULL; } if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) { - DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); + drm_dbg_kms(&dev_priv->drm, + "no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) return ret; intel_fb = ifbdev->fb; } else { - DRM_DEBUG_KMS("re-using BIOS fb\n"); + drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n"); prealloc = true; sizes->fb_width = intel_fb->base.width; sizes->fb_height = intel_fb->base.height; @@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper, info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { - DRM_ERROR("Failed to allocate fb_info\n"); + drm_err(&dev_priv->drm, "Failed to allocate fb_info\n"); ret = PTR_ERR(info); goto out_unpin; } @@ -240,7 +242,8 @@ static int intelfb_create(struct drm_fb_helper *helper, vaddr = i915_vma_pin_iomap(vma); if (IS_ERR(vaddr)) { - DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); + drm_err(&dev_priv->drm, + "Failed to remap framebuffer into virtual memory\n"); ret = PTR_ERR(vaddr); goto out_unpin; } @@ -258,9 +261,9 @@ static int intelfb_create(struct drm_fb_helper *helper, /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", - ifbdev->fb->base.width, ifbdev->fb->base.height, - i915_ggtt_offset(vma)); + drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n", + ifbdev->fb->base.width, ifbdev->fb->base.height, + i915_ggtt_offset(vma)); ifbdev->vma = vma; ifbdev->vma_flags = flags; @@ -309,6 +312,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) static bool intel_fbdev_init_bios(struct drm_device *dev, struct intel_fbdev *ifbdev) { + struct drm_i915_private *i915 = to_i915(dev); struct intel_framebuffer *fb = NULL; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; @@ -321,21 +325,24 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, intel_crtc = to_intel_crtc(crtc); if (!crtc->state->active || !obj) { - DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", - pipe_name(intel_crtc->pipe)); + drm_dbg_kms(&i915->drm, + "pipe %c not active or no fb, skipping\n", + pipe_name(intel_crtc->pipe)); continue; } if (obj->base.size > max_size) { - DRM_DEBUG_KMS("found possible fb from plane %c\n", - pipe_name(intel_crtc->pipe)); + drm_dbg_kms(&i915->drm, + "found possible fb from plane %c\n", + pipe_name(intel_crtc->pipe)); fb = to_intel_framebuffer(crtc->primary->state->fb); max_size = obj->base.size; } } if (!fb) { - DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n"); + drm_dbg_kms(&i915->drm, + "no active fbs found, not using BIOS config\n"); goto out; } @@ -346,13 +353,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, intel_crtc = to_intel_crtc(crtc); if (!crtc->state->active) { - DRM_DEBUG_KMS("pipe %c not active, skipping\n", - pipe_name(intel_crtc->pipe)); + drm_dbg_kms(&i915->drm, + "pipe %c not active, skipping\n", + pipe_name(intel_crtc->pipe)); continue; } - DRM_DEBUG_KMS("checking plane %c for BIOS fb\n", - pipe_name(intel_crtc->pipe)); + drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n", + pipe_name(intel_crtc->pipe)); /* * See if the plane fb we found above will fit on this @@ -362,9 +370,10 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, cur_size = crtc->state->adjusted_mode.crtc_hdisplay; cur_size = cur_size * fb->base.format->cpp[0]; if (fb->base.pitches[0] < cur_size) { - DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", - pipe_name(intel_crtc->pipe), - cur_size, fb->base.pitches[0]); + drm_dbg_kms(&i915->drm, + "fb not wide enough for plane %c (%d vs %d)\n", + pipe_name(intel_crtc->pipe), + cur_size, fb->base.pitches[0]); fb = NULL; break; } @@ -372,28 +381,32 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, cur_size = crtc->state->adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(&fb->base, 0, cur_size); cur_size *= fb->base.pitches[0]; - DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", - pipe_name(intel_crtc->pipe), - crtc->state->adjusted_mode.crtc_hdisplay, - crtc->state->adjusted_mode.crtc_vdisplay, - fb->base.format->cpp[0] * 8, - cur_size); + drm_dbg_kms(&i915->drm, + "pipe %c area: %dx%d, bpp: %d, size: %d\n", + pipe_name(intel_crtc->pipe), + crtc->state->adjusted_mode.crtc_hdisplay, + crtc->state->adjusted_mode.crtc_vdisplay, + fb->base.format->cpp[0] * 8, + cur_size); if (cur_size > max_size) { - DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", - pipe_name(intel_crtc->pipe), - cur_size, max_size); + drm_dbg_kms(&i915->drm, + "fb not big enough for plane %c (%d vs %d)\n", + pipe_name(intel_crtc->pipe), + cur_size, max_size); fb = NULL; break; } - DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n", - pipe_name(intel_crtc->pipe), - max_size, cur_size); + drm_dbg_kms(&i915->drm, + "fb big enough for plane %c (%d >= %d)\n", + pipe_name(intel_crtc->pipe), + max_size, cur_size); } if (!fb) { - DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n"); + drm_dbg_kms(&i915->drm, + "BIOS fb not suitable for all pipes, not using\n"); goto out; } @@ -415,7 +428,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, } - DRM_DEBUG_KMS("using BIOS fb for initial console\n"); + drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n"); return true; out: @@ -522,8 +535,9 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) * processing, fbdev will perform a full connector reprobe if a hotplug event * was received while HPD was suspended. */ -static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) +static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state) { + struct intel_fbdev *ifbdev = i915->fbdev; bool send_hpd = false; mutex_lock(&ifbdev->hpd_lock); @@ -533,7 +547,7 @@ static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) mutex_unlock(&ifbdev->hpd_lock); if (send_hpd) { - DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); + drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n"); drm_fb_helper_hotplug_event(&ifbdev->helper); } } @@ -588,7 +602,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); - intel_fbdev_hpd_set_suspend(ifbdev, state); + intel_fbdev_hpd_set_suspend(dev_priv, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c index a0cc894c3868..6f72feb14f3e 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.c +++ b/drivers/gpu/drm/i915/display/intel_global_state.c @@ -71,6 +71,7 @@ struct intel_global_state * intel_atomic_get_global_obj_state(struct intel_atomic_state *state, struct intel_global_obj *obj) { + struct drm_i915_private *i915 = to_i915(state->base.dev); int index, num_objs, i; size_t size; struct __intel_global_objs_state *arr; @@ -106,8 +107,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state, state->num_global_objs = num_objs; - DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n", - obj, obj_state, state); + drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n", + obj, obj_state, state); return obj_state; } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index ee0f27ea2810..d3ad10653b2e 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1391,6 +1391,7 @@ static int hdcp2_propagate_stream_management_info(struct intel_connector *connector) { struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_stream_manage stream_manage; @@ -1431,7 +1432,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector) hdcp->seq_num_m++; if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { - DRM_DEBUG_KMS("seq_num_m roll over.\n"); + drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n"); return -1; } @@ -2075,7 +2076,8 @@ int intel_hdcp_disable(struct intel_connector *connector) return ret; } -void intel_hdcp_update_pipe(struct intel_encoder *encoder, +void intel_hdcp_update_pipe(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 7c12ad609b1f..86bbaec120cc 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -11,6 +11,7 @@ struct drm_connector; struct drm_connector_state; struct drm_i915_private; +struct intel_atomic_state; struct intel_connector; struct intel_crtc_state; struct intel_encoder; @@ -26,7 +27,8 @@ int intel_hdcp_init(struct intel_connector *connector, int intel_hdcp_enable(struct intel_connector *connector, enum transcoder cpu_transcoder, u8 content_type); int intel_hdcp_disable(struct intel_connector *connector); -void intel_hdcp_update_pipe(struct intel_encoder *encoder, +void intel_hdcp_update_pipe(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 39930232b253..6b1bc955124c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -707,13 +707,15 @@ void intel_read_infoframe(struct intel_encoder *encoder, /* see comment above for the reason for this offset */ ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1); if (ret) { - DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type); + drm_dbg_kms(encoder->base.dev, + "Failed to unpack infoframe type 0x%02x\n", type); return; } if (frame->any.type != type) - DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n", - frame->any.type, type); + drm_dbg_kms(encoder->base.dev, + "Found the wrong infoframe type 0x%x (expected 0x%02x)\n", + frame->any.type, type); } static bool @@ -853,7 +855,8 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder, ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state); if (ret < 0) { - DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n"); + drm_dbg_kms(&dev_priv->drm, + "couldn't set HDR metadata in infoframe\n"); return false; } @@ -893,8 +896,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, if (!(val & VIDEO_DIP_ENABLE)) return; if (port != (val & VIDEO_DIP_PORT_MASK)) { - DRM_DEBUG_KMS("video DIP still enabled on port %c\n", - (val & VIDEO_DIP_PORT_MASK) >> 29); + drm_dbg_kms(&dev_priv->drm, + "video DIP still enabled on port %c\n", + (val & VIDEO_DIP_PORT_MASK) >> 29); return; } val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | @@ -906,8 +910,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, if (port != (val & VIDEO_DIP_PORT_MASK)) { if (val & VIDEO_DIP_ENABLE) { - DRM_DEBUG_KMS("video DIP already enabled on port %c\n", - (val & VIDEO_DIP_PORT_MASK) >> 29); + drm_dbg_kms(&dev_priv->drm, + "video DIP already enabled on port %c\n", + (val & VIDEO_DIP_PORT_MASK) >> 29); return; } val &= ~VIDEO_DIP_PORT_MASK; @@ -1264,8 +1269,8 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) return; - DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n", - enable ? "Enabling" : "Disabling"); + drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n", + enable ? "Enabling" : "Disabling"); drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type, adapter, enable); @@ -1346,13 +1351,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, DRM_HDCP_AN_LEN); if (ret) { - DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n", + ret); return ret; } ret = intel_gmbus_output_aksv(adapter); if (ret < 0) { - DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret); + drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret); return ret; } return 0; @@ -1361,11 +1367,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, u8 *bksv) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + int ret; ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret) - DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n", + ret); return ret; } @@ -1373,11 +1382,14 @@ static int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, u8 *bstatus) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + int ret; ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret) - DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n", + ret); return ret; } @@ -1385,12 +1397,14 @@ static int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, bool *repeater_present) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); int ret; u8 val; ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { - DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", + ret); return ret; } *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; @@ -1401,11 +1415,14 @@ static int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, u8 *ri_prime) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + int ret; ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret) - DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n", + ret); return ret; } @@ -1413,12 +1430,14 @@ static int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, bool *ksv_ready) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); int ret; u8 val; ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { - DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", + ret); return ret; } *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; @@ -1429,11 +1448,13 @@ static int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, int num_downstream, u8 *ksv_fifo) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); int ret; ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); if (ret) { - DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret); + drm_dbg_kms(&i915->drm, + "Read ksv fifo over DDC failed (%d)\n", ret); return ret; } return 0; @@ -1443,6 +1464,7 @@ static int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, int i, u32 *part) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); int ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) @@ -1451,7 +1473,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret) - DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret); + drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n", + i, ret); return ret; } @@ -1474,12 +1497,14 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false); if (ret) { - DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret); + drm_err(&dev_priv->drm, + "Disable HDCP signalling failed (%d)\n", ret); return ret; } ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true); if (ret) { - DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret); + drm_err(&dev_priv->drm, + "Enable HDCP signalling failed (%d)\n", ret); return ret; } @@ -1500,8 +1525,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable); if (ret) { - DRM_ERROR("%s HDCP signalling failed (%d)\n", - enable ? "Enable" : "Disable", ret); + drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n", + enable ? "Enable" : "Disable", ret); return ret; } @@ -1536,10 +1561,13 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & + if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & + (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) == (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { - DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n", - intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); + drm_err(&i915->drm, + "Ri' mismatch detected, link check failed (%x)\n", + intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, + port))); return false; } return true; @@ -1588,16 +1616,18 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired) } static inline -int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port, +int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, u8 msg_id, bool *msg_ready, ssize_t *msg_sz) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; int ret; - ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status); + ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status); if (ret < 0) { - DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret); + drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n", + ret); return ret; } @@ -1617,6 +1647,7 @@ static ssize_t intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, u8 msg_id, bool paired) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); bool msg_ready = false; int timeout, ret; ssize_t msg_sz = 0; @@ -1631,8 +1662,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, !ret && msg_ready && msg_sz, timeout * 1000, 1000, 5 * 1000); if (ret) - DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n", - msg_id, ret, timeout); + drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n", + msg_id, ret, timeout); return ret ? ret : msg_sz; } @@ -1651,6 +1682,7 @@ static int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, u8 msg_id, void *buf, size_t size) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_hdmi *hdmi = &intel_dig_port->hdmi; struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp; unsigned int offset; @@ -1666,15 +1698,17 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, * available buffer. */ if (ret > size) { - DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n", - ret, size); + drm_dbg_kms(&i915->drm, + "msg_sz(%zd) is more than exp size(%zu)\n", + ret, size); return -1; } offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET; ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret); if (ret) - DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret); + drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n", + msg_id, ret); return ret; } @@ -1870,15 +1904,17 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink); - DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", - pipe_name(crtc->pipe)); + drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink); + drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n", + pipe_name(crtc->pipe)); intel_audio_codec_enable(encoder, pipe_config, conn_state); } -static void g4x_enable_hdmi(struct intel_encoder *encoder, +static void g4x_enable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -1900,7 +1936,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder, intel_enable_hdmi_audio(encoder, pipe_config, conn_state); } -static void ibx_enable_hdmi(struct intel_encoder *encoder, +static void ibx_enable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -1951,7 +1988,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, intel_enable_hdmi_audio(encoder, pipe_config, conn_state); } -static void cpt_enable_hdmi(struct intel_encoder *encoder, +static void cpt_enable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -2004,13 +2042,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, intel_enable_hdmi_audio(encoder, pipe_config, conn_state); } -static void vlv_enable_hdmi(struct intel_encoder *encoder, +static void vlv_enable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { } -static void intel_disable_hdmi(struct intel_encoder *encoder, +static void intel_disable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -2068,7 +2108,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } -static void g4x_disable_hdmi(struct intel_encoder *encoder, +static void g4x_disable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -2076,10 +2117,11 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder, intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); - intel_disable_hdmi(encoder, old_crtc_state, old_conn_state); + intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state); } -static void pch_disable_hdmi(struct intel_encoder *encoder, +static void pch_disable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -2088,11 +2130,12 @@ static void pch_disable_hdmi(struct intel_encoder *encoder, old_crtc_state, old_conn_state); } -static void pch_post_disable_hdmi(struct intel_encoder *encoder, +static void pch_post_disable_hdmi(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_disable_hdmi(encoder, old_crtc_state, old_conn_state); + intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state); } static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) @@ -2289,10 +2332,12 @@ static bool intel_hdmi_ycbcr420_config(struct drm_connector *connector, struct intel_crtc_state *config) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc); if (!connector->ycbcr_420_allowed) { - DRM_ERROR("Platform doesn't support YCBCR420 output\n"); + drm_err(&i915->drm, + "Platform doesn't support YCBCR420 output\n"); return false; } @@ -2300,7 +2345,8 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, /* YCBCR 420 output conversion needs a scaler */ if (skl_update_scaler_crtc(config)) { - DRM_DEBUG_KMS("Scaler allocation for output failed\n"); + drm_dbg_kms(&i915->drm, + "Scaler allocation for output failed\n"); return false; } @@ -2341,6 +2387,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, static int intel_hdmi_compute_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -2365,13 +2412,15 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, if (crtc_state->pipe_bpp > bpc * 3) crtc_state->pipe_bpp = bpc * 3; - DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n", - bpc, crtc_state->pipe_bpp); + drm_dbg_kms(&i915->drm, + "picking %d bpc for HDMI output (pipe bpp: %d)\n", + bpc, crtc_state->pipe_bpp); if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock, false, crtc_state->has_hdmi_sink) != MODE_OK) { - DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n", - crtc_state->port_clock); + drm_dbg_kms(&i915->drm, + "unsupported HDMI clock (%d kHz), rejecting mode\n", + crtc_state->port_clock); return -EINVAL; } @@ -2434,7 +2483,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) { if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) { - DRM_ERROR("Can't support YCBCR420 output\n"); + drm_err(&dev_priv->drm, + "Can't support YCBCR420 output\n"); return -EINVAL; } } @@ -2474,25 +2524,26 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, } } - intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state); + intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, + conn_state); if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) { - DRM_DEBUG_KMS("bad AVI infoframe\n"); + drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) { - DRM_DEBUG_KMS("bad SPD infoframe\n"); + drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) { - DRM_DEBUG_KMS("bad HDMI infoframe\n"); + drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) { - DRM_DEBUG_KMS("bad DRM infoframe\n"); + drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n"); return -EINVAL; } @@ -2542,7 +2593,8 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) */ if (has_edid && !connector->override_edid && intel_bios_is_port_dp_dual_mode(dev_priv, port)) { - DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n"); + drm_dbg_kms(&dev_priv->drm, + "Assuming DP dual mode adaptor presence based on VBT\n"); type = DRM_DP_DUAL_MODE_TYPE1_DVI; } else { type = DRM_DP_DUAL_MODE_NONE; @@ -2556,9 +2608,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) hdmi->dp_dual_mode.max_tmds_clock = drm_dp_dual_mode_max_tmds_clock(type, adapter); - DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", - drm_dp_get_dual_mode_type_name(type), - hdmi->dp_dual_mode.max_tmds_clock); + drm_dbg_kms(&dev_priv->drm, + "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", + drm_dp_get_dual_mode_type_name(type), + hdmi->dp_dual_mode.max_tmds_clock); } static bool @@ -2578,7 +2631,8 @@ intel_hdmi_set_edid(struct drm_connector *connector) edid = drm_get_edid(connector, i2c); if (!edid && !intel_gmbus_is_forced_bit(i2c)) { - DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); + drm_dbg_kms(&dev_priv->drm, + "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); intel_gmbus_force_bit(i2c, true); edid = drm_get_edid(connector, i2c); intel_gmbus_force_bit(i2c, false); @@ -2610,8 +2664,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; intel_wakeref_t wakeref; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); @@ -2642,8 +2696,10 @@ out: static void intel_hdmi_force(struct drm_connector *connector) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + struct drm_i915_private *i915 = to_i915(connector->dev); + + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); intel_hdmi_unset_edid(connector); @@ -2664,7 +2720,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) return intel_connector_update_modes(connector, edid); } -static void intel_hdmi_pre_enable(struct intel_encoder *encoder, +static void intel_hdmi_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -2678,7 +2735,8 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, pipe_config, conn_state); } -static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, +static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -2695,12 +2753,13 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, pipe_config->has_infoframe, pipe_config, conn_state); - g4x_enable_hdmi(encoder, pipe_config, conn_state); + g4x_enable_hdmi(state, encoder, pipe_config, conn_state); vlv_wait_port_ready(dev_priv, dport, 0x0); } -static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder, +static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -2709,7 +2768,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder, vlv_phy_pre_pll_enable(encoder, pipe_config); } -static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder, +static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -2718,14 +2778,16 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder, chv_phy_pre_pll_enable(encoder, pipe_config); } -static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder, +static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { chv_phy_post_pll_disable(encoder, old_crtc_state); } -static void vlv_hdmi_post_disable(struct intel_encoder *encoder, +static void vlv_hdmi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -2733,7 +2795,8 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder, vlv_phy_reset_lanes(encoder, old_crtc_state); } -static void chv_hdmi_post_disable(struct intel_encoder *encoder, +static void chv_hdmi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -2748,7 +2811,8 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder, vlv_dpio_put(dev_priv); } -static void chv_hdmi_pre_enable(struct intel_encoder *encoder, +static void chv_hdmi_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -2766,7 +2830,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, pipe_config->has_infoframe, pipe_config, conn_state); - g4x_enable_hdmi(encoder, pipe_config, conn_state); + g4x_enable_hdmi(state, encoder, pipe_config, conn_state); vlv_wait_port_ready(dev_priv, dport, 0x0); @@ -2785,6 +2849,7 @@ intel_hdmi_get_i2c_adapter(struct drm_connector *connector) static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector); struct kobject *i2c_kobj = &adapter->dev.kobj; struct kobject *connector_kobj = &connector->kdev->kobj; @@ -2792,7 +2857,7 @@ static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector) ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name); if (ret) - DRM_ERROR("Failed to create i2c symlink (%d)\n", ret); + drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret); } static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector) @@ -2921,9 +2986,10 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, if (!sink_scrambling->supported) return true; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n", - connector->base.id, connector->name, - yesno(scrambling), high_tmds_clock_ratio ? 40 : 10); + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n", + connector->base.id, connector->name, + yesno(scrambling), high_tmds_clock_ratio ? 40 : 10); /* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */ return drm_scdc_set_high_tmds_clock_ratio(adapter, @@ -3065,8 +3131,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) ddc_pin = intel_bios_alternate_ddc_pin(encoder); if (ddc_pin) { - DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", - ddc_pin, port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "Using DDC pin 0x%x for port %c (VBT)\n", + ddc_pin, port_name(port)); return ddc_pin; } @@ -3083,8 +3150,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) else ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); - DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n", - ddc_pin, port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "Using DDC pin 0x%x for port %c (platform default)\n", + ddc_pin, port_name(port)); return ddc_pin; } @@ -3141,8 +3209,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_encoder->port; struct cec_connector_info conn_info; - DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n", - intel_encoder->base.base.id, intel_encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "Adding HDMI connector on [ENCODER:%d:%s]\n", + intel_encoder->base.base.id, intel_encoder->base.name); if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A)) return; @@ -3186,7 +3255,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, int ret = intel_hdcp_init(intel_connector, &intel_hdmi_hdcp_shim); if (ret) - DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + drm_dbg_kms(&dev_priv->drm, + "HDCP init failed, skipping.\n"); } /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written @@ -3205,16 +3275,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, cec_notifier_conn_register(dev->dev, port_identifier(port), &conn_info); if (!intel_hdmi->cec_notifier) - DRM_DEBUG_KMS("CEC notifier get failed\n"); + drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n"); } static enum intel_hotplug_state intel_hdmi_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector, bool irq_received) + struct intel_connector *connector) { enum intel_hotplug_state state; - state = intel_encoder_hotplug(encoder, connector, irq_received); + state = intel_encoder_hotplug(encoder, connector); /* * On many platforms the HDMI live state signal is known to be @@ -3228,7 +3298,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder, * time around we didn't detect any change in the sink's connection * status. */ - if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) + if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) state = INTEL_HOTPLUG_RETRY; return state; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index a091442efba4..4f6f560e093e 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -270,8 +270,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector, - bool irq_received) + struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; enum drm_connector_status old_status; @@ -392,12 +391,17 @@ static void i915_hotplug_work_func(struct work_struct *work) struct intel_encoder *encoder = intel_attached_encoder(connector); + if (hpd_event_bits & hpd_bit) + connector->hotplug_retries = 0; + else + connector->hotplug_retries++; + drm_dbg_kms(&dev_priv->drm, - "Connector %s (pin %i) received hotplug event.\n", - connector->base.name, pin); + "Connector %s (pin %i) received hotplug event. (retry %d)\n", + connector->base.name, pin, + connector->hotplug_retries); - switch (encoder->hotplug(encoder, connector, - hpd_event_bits & hpd_bit)) { + switch (encoder->hotplug(encoder, connector)) { case INTEL_HOTPLUG_UNCHANGED: break; case INTEL_HOTPLUG_CHANGED: diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 1e6b4fda2900..777b0743257e 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -15,8 +15,7 @@ enum port; void intel_hpd_poll_init(struct drm_i915_private *dev_priv); enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector, - bool irq_received); + struct intel_connector *connector); void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 9a067effcfa0..fe591f82163e 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -220,7 +220,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); } -static void intel_pre_enable_lvds(struct intel_encoder *encoder, +static void intel_pre_enable_lvds(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -301,7 +302,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, /* * Sets the power state for the panel. */ -static void intel_enable_lvds(struct intel_encoder *encoder, +static void intel_enable_lvds(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -323,7 +325,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder, intel_panel_enable_backlight(pipe_config, conn_state); } -static void intel_disable_lvds(struct intel_encoder *encoder, +static void intel_disable_lvds(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -341,28 +344,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, lvds_encoder->reg); } -static void gmch_disable_lvds(struct intel_encoder *encoder, +static void gmch_disable_lvds(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_panel_disable_backlight(old_conn_state); - intel_disable_lvds(encoder, old_crtc_state, old_conn_state); + intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state); } -static void pch_disable_lvds(struct intel_encoder *encoder, +static void pch_disable_lvds(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_panel_disable_backlight(old_conn_state); } -static void pch_post_disable_lvds(struct intel_encoder *encoder, +static void pch_post_disable_lvds(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_disable_lvds(encoder, old_crtc_state, old_conn_state); + intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state); } static enum drm_mode_status diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 481187223101..6e1d66323223 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -1342,7 +1342,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) if (!HAS_OVERLAY(dev_priv)) return; - engine = dev_priv->engine[RCS0]; + engine = dev_priv->gt.engine[RCS0]; if (!engine || !engine->kernel_context) return; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 276f43870802..08bfecfbe681 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -684,9 +684,10 @@ static void intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); + drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level); level = intel_panel_compute_brightness(connector, level); panel->backlight.set(conn_state, level); @@ -867,8 +868,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st * another client is not activated. */ if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { - drm_dbg(&dev_priv->drm, - "Skipping backlight disable on vga switch\n"); + drm_dbg_kms(&dev_priv->drm, + "Skipping backlight disable on vga switch\n"); return; } @@ -1244,7 +1245,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) mutex_unlock(&dev_priv->backlight_lock); - drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val); + drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val); return val; } @@ -1335,6 +1336,7 @@ static const struct backlight_ops intel_backlight_device_ops = { int intel_backlight_device_register(struct intel_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; struct backlight_properties props; @@ -1374,14 +1376,15 @@ int intel_backlight_device_register(struct intel_connector *connector) &intel_backlight_device_ops, &props); if (IS_ERR(panel->backlight.device)) { - DRM_ERROR("Failed to register backlight: %ld\n", - PTR_ERR(panel->backlight.device)); + drm_err(&i915->drm, "Failed to register backlight: %ld\n", + PTR_ERR(panel->backlight.device)); panel->backlight.device = NULL; return -ENODEV; } - DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", - connector->base.name); + drm_dbg_kms(&i915->drm, + "Connector %s backlight sysfs interface registered\n", + connector->base.name); return 0; } @@ -1931,7 +1934,8 @@ static int pwm_setup_backlight(struct intel_connector *connector, return 0; } -void intel_panel_update_backlight(struct intel_encoder *encoder, +void intel_panel_update_backlight(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index cedeea443336..11f2f6b628d8 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -37,7 +37,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -void intel_panel_update_backlight(struct intel_encoder *encoder, +void intel_panel_update_backlight(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index fd9b146e3aba..a0569fdfeb16 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -137,41 +137,42 @@ static void psr_irq_control(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, imr_reg, val); } -static void psr_event_print(u32 val, bool psr2_enabled) +static void psr_event_print(struct drm_i915_private *i915, + u32 val, bool psr2_enabled) { - DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); + drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val); if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) - DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); + drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n"); if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) - DRM_DEBUG_KMS("\tPSR2 disabled\n"); + drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n"); if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) - DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); + drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n"); if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) - DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); + drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n"); if (val & PSR_EVENT_GRAPHICS_RESET) - DRM_DEBUG_KMS("\tGraphics reset\n"); + drm_dbg_kms(&i915->drm, "\tGraphics reset\n"); if (val & PSR_EVENT_PCH_INTERRUPT) - DRM_DEBUG_KMS("\tPCH interrupt\n"); + drm_dbg_kms(&i915->drm, "\tPCH interrupt\n"); if (val & PSR_EVENT_MEMORY_UP) - DRM_DEBUG_KMS("\tMemory up\n"); + drm_dbg_kms(&i915->drm, "\tMemory up\n"); if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) - DRM_DEBUG_KMS("\tFront buffer modification\n"); + drm_dbg_kms(&i915->drm, "\tFront buffer modification\n"); if (val & PSR_EVENT_WD_TIMER_EXPIRE) - DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); + drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n"); if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) - DRM_DEBUG_KMS("\tPIPE registers updated\n"); + drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n"); if (val & PSR_EVENT_REGISTER_UPDATE) - DRM_DEBUG_KMS("\tRegister updated\n"); + drm_dbg_kms(&i915->drm, "\tRegister updated\n"); if (val & PSR_EVENT_HDCP_ENABLE) - DRM_DEBUG_KMS("\tHDCP enabled\n"); + drm_dbg_kms(&i915->drm, "\tHDCP enabled\n"); if (val & PSR_EVENT_KVMR_SESSION_ENABLE) - DRM_DEBUG_KMS("\tKVMR session enabled\n"); + drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n"); if (val & PSR_EVENT_VBI_ENABLE) - DRM_DEBUG_KMS("\tVBI enabled\n"); + drm_dbg_kms(&i915->drm, "\tVBI enabled\n"); if (val & PSR_EVENT_LPSP_MODE_EXIT) - DRM_DEBUG_KMS("\tLPSP mode exited\n"); + drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n"); if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) - DRM_DEBUG_KMS("\tPSR disabled\n"); + drm_dbg_kms(&i915->drm, "\tPSR disabled\n"); } void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) @@ -209,7 +210,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder), val); - psr_event_print(val, psr2_enabled); + psr_event_print(dev_priv, val, psr2_enabled); } } @@ -249,18 +250,21 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 val = 8; /* assume the worst if we can't read the value */ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; else - DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); + drm_dbg_kms(&i915->drm, + "Unable to get sink synchronization latency, assuming 8 frames\n"); return val; } static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u16 val; ssize_t r; @@ -273,7 +277,8 @@ static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); if (r != 2) - DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n"); + drm_dbg_kms(&i915->drm, + "Unable to read DP_PSR2_SU_X_GRANULARITY\n"); /* * Spec says that if the value read is 0 the default granularity should diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 637d8fe2f8c2..bc6c26818e15 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1430,7 +1430,8 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo, #undef UPDATE_PROPERTY } -static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, +static void intel_sdvo_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *intel_encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -1727,7 +1728,8 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, SDVO_AUDIO_PRESENCE_DETECT); } -static void intel_disable_sdvo(struct intel_encoder *encoder, +static void intel_disable_sdvo(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *conn_state) { @@ -1775,20 +1777,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, } } -static void pch_disable_sdvo(struct intel_encoder *encoder, +static void pch_disable_sdvo(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { } -static void pch_post_disable_sdvo(struct intel_encoder *encoder, +static void pch_post_disable_sdvo(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_disable_sdvo(encoder, old_crtc_state, old_conn_state); + intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state); } -static void intel_enable_sdvo(struct intel_encoder *encoder, +static void intel_enable_sdvo(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -1934,12 +1939,11 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) static enum intel_hotplug_state intel_sdvo_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector, - bool irq_received) + struct intel_connector *connector) { intel_sdvo_enable_hotplug(encoder); - return intel_encoder_hotplug(encoder, connector, irq_received); + return intel_encoder_hotplug(encoder, connector); } static bool diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index deda351719db..0000ec7055f7 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -2503,6 +2503,7 @@ static const u32 skl_plane_formats[] = { DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, + DRM_FORMAT_XYUV8888, }; static const u32 skl_planar_formats[] = { @@ -2521,6 +2522,7 @@ static const u32 skl_planar_formats[] = { DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, + DRM_FORMAT_XYUV8888, }; static const u32 glk_planar_formats[] = { @@ -2539,6 +2541,7 @@ static const u32 glk_planar_formats[] = { DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, + DRM_FORMAT_XYUV8888, DRM_FORMAT_P010, DRM_FORMAT_P012, DRM_FORMAT_P016, @@ -2562,6 +2565,7 @@ static const u32 icl_sdr_y_plane_formats[] = { DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, + DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, DRM_FORMAT_XVYU12_16161616, DRM_FORMAT_XVYU16161616, @@ -2589,6 +2593,7 @@ static const u32 icl_sdr_uv_plane_formats[] = { DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, + DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, DRM_FORMAT_XVYU12_16161616, DRM_FORMAT_XVYU16161616, @@ -2620,6 +2625,7 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, + DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, DRM_FORMAT_XVYU12_16161616, DRM_FORMAT_XVYU16161616, @@ -2790,6 +2796,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: @@ -2817,19 +2824,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id) +static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, + enum plane_id plane_id) { + /* Wa_14010477008:tgl[a0..c0] */ + if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) + return false; + return plane_id < PLANE_SPRITE4; } static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { + struct drm_i915_private *dev_priv = to_i915(_plane->dev); struct intel_plane *plane = to_intel_plane(_plane); switch (modifier) { case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (!gen12_plane_supports_mc_ccs(plane->id)) + if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) return false; /* fall through */ case DRM_FORMAT_MOD_LINEAR: @@ -2854,6 +2867,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: @@ -2998,9 +3012,10 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, } } -static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id) +static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, + enum plane_id plane_id) { - if (gen12_plane_supports_mc_ccs(plane_id)) + if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) return gen12_plane_format_modifiers_mc_ccs; else return gen12_plane_format_modifiers_rc_ccs; @@ -3070,7 +3085,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (INTEL_GEN(dev_priv) >= 12) { - modifiers = gen12_get_plane_modifiers(plane_id); + modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); plane_funcs = &gen12_plane_funcs; } else { if (plane->has_ccs) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 9b850c11aa78..275618bedf32 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -152,6 +152,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, u32 live_status_mask) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u32 valid_hpd_mask; if (dig_port->tc_legacy_port) @@ -164,8 +165,9 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, return; /* If live status mismatches the VBT flag, trust the live status. */ - DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n", - dig_port->tc_port_name, live_status_mask); + drm_err(&i915->drm, + "Port %s: live status %08x mismatches the legacy port flag, fix flag\n", + dig_port->tc_port_name, live_status_mask); dig_port->tc_legacy_port = !dig_port->tc_legacy_port; } @@ -233,8 +235,7 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", - dig_port->tc_port_name, - enableddisabled(enable)); + dig_port->tc_port_name, enableddisabled(enable)); return false; } @@ -286,11 +287,12 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) static void icl_tc_phy_connect(struct intel_digital_port *dig_port, int required_lanes) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int max_lanes; if (!icl_tc_phy_status_complete(dig_port)) { - DRM_DEBUG_KMS("Port %s: PHY not ready\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", + dig_port->tc_port_name); goto out_set_tbt_alt_mode; } @@ -311,15 +313,16 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port, * became disconnected. Not necessary for legacy mode. */ if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) { - DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n", + dig_port->tc_port_name); goto out_set_safe_mode; } if (max_lanes < required_lanes) { - DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n", - dig_port->tc_port_name, - max_lanes, required_lanes); + drm_dbg_kms(&i915->drm, + "Port %s: PHY max lanes %d < required lanes %d\n", + dig_port->tc_port_name, + max_lanes, required_lanes); goto out_set_safe_mode; } @@ -357,15 +360,17 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + if (!icl_tc_phy_status_complete(dig_port)) { - DRM_DEBUG_KMS("Port %s: PHY status not complete\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n", + dig_port->tc_port_name); return dig_port->tc_mode == TC_PORT_TBT_ALT; } if (icl_tc_phy_is_in_safe_mode(dig_port)) { - DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n", + dig_port->tc_port_name); return false; } @@ -438,6 +443,7 @@ intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, void intel_tc_port_sanitize(struct intel_digital_port *dig_port) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_encoder *encoder = &dig_port->base; int active_links = 0; @@ -451,8 +457,9 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) if (active_links) { if (!icl_tc_phy_is_connected(dig_port)) - DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n", - dig_port->tc_port_name, active_links); + drm_dbg_kms(&i915->drm, + "Port %s: PHY disconnected with %d active link(s)\n", + dig_port->tc_port_name, active_links); intel_tc_port_link_init_refcount(dig_port, active_links); goto out; @@ -462,9 +469,9 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) icl_tc_phy_connect(dig_port, 1); out: - DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n", - dig_port->tc_port_name, - tc_port_mode_name(dig_port->tc_mode)); + drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", + dig_port->tc_port_name, + tc_port_mode_name(dig_port->tc_mode)); mutex_unlock(&dig_port->tc_lock); } diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index d2e3a3a323e9..fbe12aad7d58 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -914,7 +914,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) } static void -intel_enable_tv(struct intel_encoder *encoder, +intel_enable_tv(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -930,7 +931,8 @@ intel_enable_tv(struct intel_encoder *encoder, } static void -intel_disable_tv(struct intel_encoder *encoder, +intel_disable_tv(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { @@ -1414,7 +1416,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv, (color_conversion->bv << 16) | color_conversion->av); } -static void intel_tv_pre_enable(struct intel_encoder *encoder, +static void intel_tv_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -1698,13 +1701,13 @@ intel_tv_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); enum drm_connector_status status; int type; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", - connector->base.id, connector->name, - force); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n", + connector->base.id, connector->name, force); if (force) { struct intel_load_detect_pipe tmp; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index f4c362dc6e15..4e18d4627065 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -759,7 +759,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder); * DSI port enable has to be done before pipe and plane enable, so we do it in * the pre_enable hook instead of the enable hook. */ -static void intel_dsi_pre_enable(struct intel_encoder *encoder, +static void intel_dsi_pre_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { @@ -858,7 +859,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); } -static void bxt_dsi_enable(struct intel_encoder *encoder, +static void bxt_dsi_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -871,14 +873,16 @@ static void bxt_dsi_enable(struct intel_encoder *encoder, * DSI port disable has to be done after pipe and plane disable, so we do it in * the post_disable hook. */ -static void intel_dsi_disable(struct intel_encoder *encoder, +static void intel_dsi_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&i915->drm, "\n"); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_panel_disable_backlight(old_conn_state); @@ -906,7 +910,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) vlv_dsi_clear_device_ready(encoder); } -static void intel_dsi_post_disable(struct intel_encoder *encoder, +static void intel_dsi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 68326ad3b2e0..11d9135cf21a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -570,23 +570,19 @@ static void engines_idle_release(struct i915_gem_context *ctx, engines->ctx = i915_gem_context_get(ctx); for_each_gem_engine(ce, engines, it) { - struct dma_fence *fence; - int err = 0; + int err; /* serialises with execbuf */ set_bit(CONTEXT_CLOSED_BIT, &ce->flags); if (!intel_context_pin_if_active(ce)) continue; - fence = i915_active_fence_get(&ce->timeline->last_request); - if (fence) { - err = i915_sw_fence_await_dma_fence(&engines->fence, - fence, 0, - GFP_KERNEL); - dma_fence_put(fence); - } + /* Wait until context is finally scheduled out and retired */ + err = i915_sw_fence_await_active(&engines->fence, + &ce->active, + I915_ACTIVE_AWAIT_BARRIER); intel_context_unpin(ce); - if (err < 0) + if (err) goto kill; } @@ -757,21 +753,46 @@ err_free: return ERR_PTR(err); } +static inline struct i915_gem_engines * +__context_engines_await(const struct i915_gem_context *ctx) +{ + struct i915_gem_engines *engines; + + rcu_read_lock(); + do { + engines = rcu_dereference(ctx->engines); + GEM_BUG_ON(!engines); + + if (unlikely(!i915_sw_fence_await(&engines->fence))) + continue; + + if (likely(engines == rcu_access_pointer(ctx->engines))) + break; + + i915_sw_fence_complete(&engines->fence); + } while (1); + rcu_read_unlock(); + + return engines; +} + static int context_apply_all(struct i915_gem_context *ctx, int (*fn)(struct intel_context *ce, void *data), void *data) { struct i915_gem_engines_iter it; + struct i915_gem_engines *e; struct intel_context *ce; int err = 0; - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + e = __context_engines_await(ctx); + for_each_gem_engine(ce, e, it) { err = fn(ce, data); if (err) break; } - i915_gem_context_unlock_engines(ctx); + i915_sw_fence_complete(&e->fence); return err; } @@ -786,11 +807,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm) static struct i915_address_space * __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) { - struct i915_address_space *old = i915_gem_context_vm(ctx); + struct i915_address_space *old; + old = rcu_replace_pointer(ctx->vm, + i915_vm_open(vm), + lockdep_is_held(&ctx->mutex)); GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); - rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); context_apply_all(ctx, __apply_ppgtt, vm); return old; @@ -1069,30 +1092,6 @@ static void cb_retire(struct i915_active *base) kfree(cb); } -static inline struct i915_gem_engines * -__context_engines_await(const struct i915_gem_context *ctx) -{ - struct i915_gem_engines *engines; - - rcu_read_lock(); - do { - engines = rcu_dereference(ctx->engines); - if (unlikely(!engines)) - break; - - if (unlikely(!i915_sw_fence_await(&engines->fence))) - continue; - - if (likely(engines == rcu_access_pointer(ctx->engines))) - break; - - i915_sw_fence_complete(&engines->fence); - } while (1); - rcu_read_unlock(); - - return engines; -} - I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); static int context_barrier_task(struct i915_gem_context *ctx, intel_engine_mask_t engines, @@ -1401,10 +1400,10 @@ static int get_ringsize(struct i915_gem_context *ctx, return 0; } -static int -user_to_context_sseu(struct drm_i915_private *i915, - const struct drm_i915_gem_context_param_sseu *user, - struct intel_sseu *context) +int +i915_gem_user_to_context_sseu(struct drm_i915_private *i915, + const struct drm_i915_gem_context_param_sseu *user, + struct intel_sseu *context) { const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; @@ -1539,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx, goto out_ce; } - ret = user_to_context_sseu(i915, &user_sseu, &sseu); + ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu); if (ret) goto out_ce; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index f1d884d304bd..3702b2fb27ab 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -225,4 +225,8 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it); struct i915_lut_handle *i915_lut_handle_alloc(void); void i915_lut_handle_free(struct i915_lut_handle *lut); +int i915_gem_user_to_context_sseu(struct drm_i915_private *i915, + const struct drm_i915_gem_context_param_sseu *user, + struct intel_sseu *context); + #endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 0cc40e77bbd2..af43e82f45c7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -369,7 +369,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) struct i915_vma *vma; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - if (!atomic_read(&obj->bind_count)) + if (list_empty(&obj->vma.list)) return; mutex_lock(&i915->ggtt.vm.mutex); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 36d069504836..517898aa634c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -40,6 +40,11 @@ struct eb_vma { u32 handle; }; +struct eb_vma_array { + struct kref kref; + struct eb_vma vma[]; +}; + enum { FORCE_CPU_RELOC = 1, FORCE_GTT_RELOC, @@ -52,7 +57,6 @@ enum { #define __EXEC_OBJECT_NEEDS_MAP BIT(29) #define __EXEC_OBJECT_NEEDS_BIAS BIT(28) #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */ -#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) #define __EXEC_HAS_RELOC BIT(31) #define __EXEC_INTERNAL_FLAGS (~0u << 31) @@ -283,6 +287,7 @@ struct i915_execbuffer { */ int lut_size; struct hlist_head *buckets; /** ht for relocation handles */ + struct eb_vma_array *array; }; static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) @@ -292,8 +297,62 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) eb->args->batch_len); } +static struct eb_vma_array *eb_vma_array_create(unsigned int count) +{ + struct eb_vma_array *arr; + + arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN); + if (!arr) + return NULL; + + kref_init(&arr->kref); + arr->vma[0].vma = NULL; + + return arr; +} + +static inline void eb_unreserve_vma(struct eb_vma *ev) +{ + struct i915_vma *vma = ev->vma; + + if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) + __i915_vma_unpin_fence(vma); + + if (ev->flags & __EXEC_OBJECT_HAS_PIN) + __i915_vma_unpin(vma); + + ev->flags &= ~(__EXEC_OBJECT_HAS_PIN | + __EXEC_OBJECT_HAS_FENCE); +} + +static void eb_vma_array_destroy(struct kref *kref) +{ + struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref); + struct eb_vma *ev = arr->vma; + + while (ev->vma) { + eb_unreserve_vma(ev); + i915_vma_put(ev->vma); + ev++; + } + + kvfree(arr); +} + +static void eb_vma_array_put(struct eb_vma_array *arr) +{ + kref_put(&arr->kref, eb_vma_array_destroy); +} + static int eb_create(struct i915_execbuffer *eb) { + /* Allocate an extra slot for use by the command parser + sentinel */ + eb->array = eb_vma_array_create(eb->buffer_count + 2); + if (!eb->array) + return -ENOMEM; + + eb->vma = eb->array->vma; + if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { unsigned int size = 1 + ilog2(eb->buffer_count); @@ -327,8 +386,10 @@ static int eb_create(struct i915_execbuffer *eb) break; } while (--size); - if (unlikely(!size)) + if (unlikely(!size)) { + eb_vma_array_put(eb->array); return -ENOMEM; + } eb->lut_size = size; } else { @@ -368,6 +429,32 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, return false; } +static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry, + unsigned int exec_flags) +{ + u64 pin_flags = 0; + + if (exec_flags & EXEC_OBJECT_NEEDS_GTT) + pin_flags |= PIN_GLOBAL; + + /* + * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, + * limit address to the first 4GBs for unflagged objects. + */ + if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) + pin_flags |= PIN_ZONE_4G; + + if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) + pin_flags |= PIN_MAPPABLE; + + if (exec_flags & EXEC_OBJECT_PINNED) + pin_flags |= entry->offset | PIN_OFFSET_FIXED; + else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) + pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + + return pin_flags; +} + static inline bool eb_pin_vma(struct i915_execbuffer *eb, const struct drm_i915_gem_exec_object2 *entry, @@ -385,8 +472,19 @@ eb_pin_vma(struct i915_execbuffer *eb, if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) pin_flags |= PIN_GLOBAL; - if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) - return false; + /* Attempt to reuse the current location if available */ + if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) { + if (entry->flags & EXEC_OBJECT_PINNED) + return false; + + /* Failing that pick any _free_ space if suitable */ + if (unlikely(i915_vma_pin(vma, + entry->pad_to_size, + entry->alignment, + eb_pin_flags(entry, ev->flags) | + PIN_USER | PIN_NOEVICT))) + return false; + } if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { if (unlikely(i915_vma_pin_fence(vma))) { @@ -402,26 +500,6 @@ eb_pin_vma(struct i915_execbuffer *eb, return !eb_vma_misplaced(entry, vma, ev->flags); } -static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) -{ - GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN)); - - if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE)) - __i915_vma_unpin_fence(vma); - - __i915_vma_unpin(vma); -} - -static inline void -eb_unreserve_vma(struct eb_vma *ev) -{ - if (!(ev->flags & __EXEC_OBJECT_HAS_PIN)) - return; - - __eb_unreserve_vma(ev->vma, ev->flags); - ev->flags &= ~__EXEC_OBJECT_RESERVED; -} - static int eb_validate_vma(struct i915_execbuffer *eb, struct drm_i915_gem_exec_object2 *entry, @@ -481,7 +559,7 @@ eb_add_vma(struct i915_execbuffer *eb, GEM_BUG_ON(i915_vma_is_closed(vma)); - ev->vma = i915_vma_get(vma); + ev->vma = vma; ev->exec = entry; ev->flags = entry->flags; @@ -547,28 +625,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, u64 pin_flags) { struct drm_i915_gem_exec_object2 *entry = ev->exec; - unsigned int exec_flags = ev->flags; struct i915_vma *vma = ev->vma; int err; - if (exec_flags & EXEC_OBJECT_NEEDS_GTT) - pin_flags |= PIN_GLOBAL; - - /* - * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, - * limit address to the first 4GBs for unflagged objects. - */ - if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) - pin_flags |= PIN_ZONE_4G; - - if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) - pin_flags |= PIN_MAPPABLE; - - if (exec_flags & EXEC_OBJECT_PINNED) - pin_flags |= entry->offset | PIN_OFFSET_FIXED; - else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) - pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; - if (drm_mm_node_allocated(&vma->node) && eb_vma_misplaced(entry, vma, ev->flags)) { err = i915_vma_unbind(vma); @@ -578,7 +637,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, err = i915_vma_pin(vma, entry->pad_to_size, entry->alignment, - pin_flags); + eb_pin_flags(entry, ev->flags) | pin_flags); if (err) return err; @@ -587,7 +646,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, eb->args->flags |= __EXEC_HAS_RELOC; } - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { + if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { err = i915_vma_pin_fence(vma); if (unlikely(err)) { i915_vma_unpin(vma); @@ -595,10 +654,10 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, } if (vma->fence) - exec_flags |= __EXEC_OBJECT_HAS_FENCE; + ev->flags |= __EXEC_OBJECT_HAS_FENCE; } - ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN; + ev->flags |= __EXEC_OBJECT_HAS_PIN; GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); return 0; @@ -728,77 +787,117 @@ static int eb_select_context(struct i915_execbuffer *eb) return 0; } -static int eb_lookup_vmas(struct i915_execbuffer *eb) +static int __eb_add_lut(struct i915_execbuffer *eb, + u32 handle, struct i915_vma *vma) { - struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma; - struct drm_i915_gem_object *obj; - unsigned int i, batch; + struct i915_gem_context *ctx = eb->gem_context; + struct i915_lut_handle *lut; int err; - if (unlikely(i915_gem_context_is_closed(eb->gem_context))) - return -ENOENT; + lut = i915_lut_handle_alloc(); + if (unlikely(!lut)) + return -ENOMEM; - INIT_LIST_HEAD(&eb->relocs); - INIT_LIST_HEAD(&eb->unbound); + i915_vma_get(vma); + if (!atomic_fetch_inc(&vma->open_count)) + i915_vma_reopen(vma); + lut->handle = handle; + lut->ctx = ctx; + + /* Check that the context hasn't been closed in the meantime */ + err = -EINTR; + if (!mutex_lock_interruptible(&ctx->mutex)) { + err = -ENOENT; + if (likely(!i915_gem_context_is_closed(ctx))) + err = radix_tree_insert(&ctx->handles_vma, handle, vma); + if (err == 0) { /* And nor has this handle */ + struct drm_i915_gem_object *obj = vma->obj; + + i915_gem_object_lock(obj); + if (idr_find(&eb->file->object_idr, handle) == obj) { + list_add(&lut->obj_link, &obj->lut_list); + } else { + radix_tree_delete(&ctx->handles_vma, handle); + err = -ENOENT; + } + i915_gem_object_unlock(obj); + } + mutex_unlock(&ctx->mutex); + } + if (unlikely(err)) + goto err; - batch = eb_batch_index(eb); + return 0; - for (i = 0; i < eb->buffer_count; i++) { - u32 handle = eb->exec[i].handle; - struct i915_lut_handle *lut; +err: + atomic_dec(&vma->open_count); + i915_vma_put(vma); + i915_lut_handle_free(lut); + return err; +} + +static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) +{ + do { + struct drm_i915_gem_object *obj; struct i915_vma *vma; + int err; - vma = radix_tree_lookup(handles_vma, handle); + rcu_read_lock(); + vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); + if (likely(vma)) + vma = i915_vma_tryget(vma); + rcu_read_unlock(); if (likely(vma)) - goto add_vma; + return vma; obj = i915_gem_object_lookup(eb->file, handle); - if (unlikely(!obj)) { - err = -ENOENT; - goto err_vma; - } + if (unlikely(!obj)) + return ERR_PTR(-ENOENT); vma = i915_vma_instance(obj, eb->context->vm, NULL); if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; + i915_gem_object_put(obj); + return vma; } - lut = i915_lut_handle_alloc(); - if (unlikely(!lut)) { - err = -ENOMEM; - goto err_obj; - } + err = __eb_add_lut(eb, handle, vma); + if (likely(!err)) + return vma; - err = radix_tree_insert(handles_vma, handle, vma); - if (unlikely(err)) { - i915_lut_handle_free(lut); - goto err_obj; - } + i915_gem_object_put(obj); + if (err != -EEXIST) + return ERR_PTR(err); + } while (1); +} + +static int eb_lookup_vmas(struct i915_execbuffer *eb) +{ + unsigned int batch = eb_batch_index(eb); + unsigned int i; + int err = 0; - /* transfer ref to lut */ - if (!atomic_fetch_inc(&vma->open_count)) - i915_vma_reopen(vma); - lut->handle = handle; - lut->ctx = eb->gem_context; + INIT_LIST_HEAD(&eb->relocs); + INIT_LIST_HEAD(&eb->unbound); - i915_gem_object_lock(obj); - list_add(&lut->obj_link, &obj->lut_list); - i915_gem_object_unlock(obj); + for (i = 0; i < eb->buffer_count; i++) { + struct i915_vma *vma; + + vma = eb_lookup_vma(eb, eb->exec[i].handle); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + break; + } -add_vma: err = eb_validate_vma(eb, &eb->exec[i], vma); - if (unlikely(err)) - goto err_vma; + if (unlikely(err)) { + i915_vma_put(vma); + break; + } eb_add_vma(eb, i, batch, vma); } - return 0; - -err_obj: - i915_gem_object_put(obj); -err_vma: eb->vma[i].vma = NULL; return err; } @@ -823,31 +922,13 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) } } -static void eb_release_vmas(const struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - unsigned int i; - - for (i = 0; i < count; i++) { - struct eb_vma *ev = &eb->vma[i]; - struct i915_vma *vma = ev->vma; - - if (!vma) - break; - - eb->vma[i].vma = NULL; - - if (ev->flags & __EXEC_OBJECT_HAS_PIN) - __eb_unreserve_vma(vma, ev->flags); - - i915_vma_put(vma); - } -} - static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); + if (eb->array) + eb_vma_array_put(eb->array); + if (eb->lut_size > 0) kfree(eb->buckets); } @@ -896,11 +977,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) static void reloc_gpu_flush(struct reloc_cache *cache) { - GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); + struct drm_i915_gem_object *obj = cache->rq->batch->obj; + + GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); - i915_gem_object_unpin_map(cache->rq->batch->obj); + __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); + i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(cache->rq->engine->gt); @@ -1218,6 +1301,17 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, return cmd; } +static inline bool use_reloc_gpu(struct i915_vma *vma) +{ + if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) + return true; + + if (DBG_FORCE_RELOC) + return false; + + return !dma_resv_test_signaled_rcu(vma->resv, true); +} + static u64 relocate_entry(struct i915_vma *vma, const struct drm_i915_gem_relocation_entry *reloc, @@ -1229,9 +1323,7 @@ relocate_entry(struct i915_vma *vma, bool wide = eb->reloc_cache.use_64bit_reloc; void *vaddr; - if (!eb->reloc_cache.vaddr && - (DBG_FORCE_RELOC == FORCE_GPU_RELOC || - !dma_resv_test_signaled_rcu(vma->resv, true))) { + if (!eb->reloc_cache.vaddr && use_reloc_gpu(vma)) { const unsigned int gen = eb->reloc_cache.gen; unsigned int len; u32 *batch; @@ -1409,12 +1501,11 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; - struct drm_i915_gem_relocation_entry __user *urelocs; const struct drm_i915_gem_exec_object2 *entry = ev->exec; - unsigned int remain; + struct drm_i915_gem_relocation_entry __user *urelocs = + u64_to_user_ptr(entry->relocs_ptr); + unsigned long remain = entry->relocation_count; - urelocs = u64_to_user_ptr(entry->relocs_ptr); - remain = entry->relocation_count; if (unlikely(remain > N_RELOC(ULONG_MAX))) return -EINVAL; @@ -1423,13 +1514,13 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) * to read. However, if the array is not writable the user loses * the updated relocation values. */ - if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs)))) + if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs)))) return -EFAULT; do { struct drm_i915_gem_relocation_entry *r = stack; unsigned int count = - min_t(unsigned int, remain, ARRAY_SIZE(stack)); + min_t(unsigned long, remain, ARRAY_SIZE(stack)); unsigned int copied; /* @@ -1477,10 +1568,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) * can read from this userspace address. */ offset = gen8_canonical_addr(offset & ~UPDATE); - if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { - remain = -EFAULT; - goto out; - } + __put_user(offset, + &urelocs[r - stack].presumed_offset); } } while (r++, --count); urelocs += ARRAY_SIZE(stack); @@ -1494,9 +1583,7 @@ static int eb_relocate(struct i915_execbuffer *eb) { int err; - mutex_lock(&eb->gem_context->mutex); err = eb_lookup_vmas(eb); - mutex_unlock(&eb->gem_context->mutex); if (err) return err; @@ -1597,19 +1684,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) err = i915_vma_move_to_active(vma, eb->request, flags); i915_vma_unlock(vma); - - __eb_unreserve_vma(vma, flags); - i915_vma_put(vma); - - ev->vma = NULL; + eb_unreserve_vma(ev); } ww_acquire_fini(&acquire); + eb_vma_array_put(fetch_and_zero(&eb->array)); + if (unlikely(err)) goto err_skip; - eb->exec = NULL; - /* Unconditionally flush any chipset caches (for streaming writes). */ intel_gt_chipset_flush(eb->engine->gt); return 0; @@ -1784,7 +1867,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, dma_resv_add_excl_fence(shadow->resv, &pw->base.dma); dma_resv_unlock(shadow->resv); - dma_fence_work_commit(&pw->base); + dma_fence_work_commit_imm(&pw->base); return 0; err_batch_unlock: @@ -1861,6 +1944,7 @@ static int eb_parse(struct i915_execbuffer *eb) eb->vma[eb->buffer_count].vma = i915_vma_get(shadow); eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN; eb->batch = &eb->vma[eb->buffer_count++]; + eb->vma[eb->buffer_count].vma = NULL; eb->trampoline = trampoline; eb->batch_start_offset = 0; @@ -2348,9 +2432,7 @@ static void eb_request_add(struct i915_execbuffer *eb) __i915_request_skip(rq); } - local_bh_disable(); __i915_request_queue(rq, &attr); - local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ /* Try to clean up the client's timeline after submitting the request */ if (prev) @@ -2386,8 +2468,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, args->flags |= __EXEC_HAS_RELOC; eb.exec = exec; - eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); - eb.vma[0].vma = NULL; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; reloc_cache_init(&eb.reloc_cache, eb.i915); @@ -2594,8 +2674,6 @@ err_parse: if (batch->private) intel_engine_pool_put(batch->private); err_vma: - if (eb.exec) - eb_release_vmas(&eb); if (eb.trampoline) i915_vma_unpin(eb.trampoline); eb_unpin_engine(&eb); @@ -2615,7 +2693,7 @@ err_in_fence: static size_t eb_element_size(void) { - return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma); + return sizeof(struct drm_i915_gem_exec_object2); } static bool check_buffer_count(size_t count) @@ -2671,7 +2749,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, /* Copy in the exec list from userland */ exec_list = kvmalloc_array(count, sizeof(*exec_list), __GFP_NOWARN | GFP_KERNEL); - exec2_list = kvmalloc_array(count + 1, eb_element_size(), + exec2_list = kvmalloc_array(count, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { drm_dbg(&i915->drm, @@ -2749,8 +2827,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, if (err) return err; - /* Allocate an extra slot for use by the command parser */ - exec2_list = kvmalloc_array(count + 1, eb_element_size(), + exec2_list = kvmalloc_array(count, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 5da9f9e534b9..3f01cdd1a39b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -206,7 +206,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, } obj->mmo.offsets = RB_ROOT; - GEM_BUG_ON(atomic_read(&obj->bind_count)); GEM_BUG_ON(obj->userfault_count); GEM_BUG_ON(!list_empty(&obj->lut_list)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index a0b10bcd8d8a..54ee658bb168 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -179,9 +179,6 @@ struct drm_i915_gem_object { #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1) #define STRIDE_MASK (~TILING_MASK) - /** Count of VMA actually bound by this object */ - atomic_t bind_count; - struct { /* * Protects the pages and their use. Do not use directly, but diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 24f4cadea114..5d855fcd5c0f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) if (i915_gem_object_has_pinned_pages(obj)) return -EBUSY; - GEM_BUG_ON(atomic_read(&obj->bind_count)); - /* May be called by shrinker from within get_pages() (on another bo) */ mutex_lock(&obj->mm.lock); if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 698e22420dc5..7fe9831aa9ba 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -10,8 +10,6 @@ #include <drm/drm.h> /* for drm_legacy.h! */ #include <drm/drm_cache.h> -#include <drm/drm_legacy.h> /* for drm_pci.h! */ -#include <drm/drm_pci.h> #include "gt/intel_gt.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 03e5eb4c99d1..5b65ce738b16 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -27,18 +27,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) return false; /* - * Only report true if by unbinding the object and putting its pages - * we can actually make forward progress towards freeing physical - * pages. - * - * If the pages are pinned for any other reason than being bound - * to the GPU, simply unbinding from the GPU is not going to succeed - * in releasing our pin count on the pages themselves. - */ - if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count)) - return false; - - /* * We can only return physical pages to the system if we can either * discard the contents (because the user has marked them as being * purgeable) or if we can move their contents out to swap. @@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, flags = 0; if (shrink & I915_SHRINK_ACTIVE) flags = I915_GEM_OBJECT_UNBIND_ACTIVE; + if (!(shrink & I915_SHRINK_BOUND)) + flags = I915_GEM_OBJECT_UNBIND_TEST; if (i915_gem_object_unbind(obj, flags) == 0) __i915_gem_object_put_pages(obj); @@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915, i915_gem_object_is_framebuffer(obj)) continue; - if (!(shrink & I915_SHRINK_BOUND) && - atomic_read(&obj->bind_count)) - continue; - if (!can_release_pages(obj)) continue; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 5557dfa83a7b..dc250278bd2c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -381,14 +381,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) mutex_init(&i915->mm.stolen_lock); if (intel_vgpu_active(i915)) { - dev_notice(i915->drm.dev, + drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "iGVT-g active"); return 0; } if (intel_vtd_active() && INTEL_GEN(i915) < 8) { - dev_notice(i915->drm.dev, + drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "DMAR active"); return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index fa16f2c3f3ac..2b46c6530da9 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -88,8 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj, } static const struct drm_i915_gem_object_ops huge_ops = { - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, .get_pages = huge_get_pages, .put_pages = huge_put_pages, }; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 54b86cf7f5d2..f4f933240b39 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1925,7 +1925,7 @@ static int mock_context_barrier(void *arg) goto out; } - rq = igt_request_alloc(ctx, i915->engine[RCS0]); + rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]); if (IS_ERR(rq)) { pr_err("Request allocation failed!\n"); goto out; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 43912e9b683d..ef7abcb3f4ee 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1156,9 +1156,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, if (err) goto out_unmap; - GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT && - !atomic_read(&obj->bind_count)); - err = check_present(addr, obj->base.size); if (err) { pr_err("%s: was not present\n", obj->mm.region->name); @@ -1175,7 +1172,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, pr_err("Failed to unbind object!\n"); goto out_unmap; } - GEM_BUG_ON(atomic_read(&obj->bind_count)); if (type != I915_MMAP_TYPE_GTT) { __i915_gem_object_put_pages(obj); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c index 2b6db6f799de..faa5b6d91795 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c @@ -14,7 +14,7 @@ static int igt_gem_object(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; - int err = -ENOMEM; + int err; /* Basic test to ensure we can create an object */ diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c index 6a5e9ab20b94..5e3725e62241 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_engines.c +++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c @@ -32,5 +32,5 @@ void debugfs_engines_register(struct intel_gt *gt, struct dentry *root) { "engines", &engines_fops }, }; - debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files)); + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); } diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c index 75255aaacaed..1de5fbaa1cf9 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c @@ -9,6 +9,7 @@ #include "debugfs_engines.h" #include "debugfs_gt.h" #include "debugfs_gt_pm.h" +#include "uc/intel_uc_debugfs.h" #include "i915_drv.h" void debugfs_gt_register(struct intel_gt *gt) @@ -24,17 +25,19 @@ void debugfs_gt_register(struct intel_gt *gt) debugfs_engines_register(gt, root); debugfs_gt_pm_register(gt, root); + + intel_uc_debugfs_register(>->uc, root); } -void debugfs_gt_register_files(struct intel_gt *gt, - struct dentry *root, - const struct debugfs_gt_file *files, - unsigned long count) +void intel_gt_debugfs_register_files(struct dentry *root, + const struct debugfs_gt_file *files, + unsigned long count, void *data) { while (count--) { - if (!files->eval || files->eval(gt)) + umode_t mode = files->fops->write ? 0644 : 0444; + if (!files->eval || files->eval(data)) debugfs_create_file(files->name, - 0444, root, gt, + mode, root, data, files->fops); files++; diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h index 4ea0f06cda8f..f77540f727e9 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt.h +++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h @@ -28,12 +28,11 @@ void debugfs_gt_register(struct intel_gt *gt); struct debugfs_gt_file { const char *name; const struct file_operations *fops; - bool (*eval)(const struct intel_gt *gt); + bool (*eval)(void *data); }; -void debugfs_gt_register_files(struct intel_gt *gt, - struct dentry *root, - const struct debugfs_gt_file *files, - unsigned long count); +void intel_gt_debugfs_register_files(struct dentry *root, + const struct debugfs_gt_file *files, + unsigned long count, void *data); #endif /* DEBUGFS_GT_H */ diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c index 059c9e5c002e..aab30d908072 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c @@ -506,8 +506,10 @@ static int llc_show(struct seq_file *m, void *data) return 0; } -static bool llc_eval(const struct intel_gt *gt) +static bool llc_eval(void *data) { + struct intel_gt *gt = data; + return HAS_LLC(gt->i915); } @@ -580,8 +582,10 @@ static int rps_boost_show(struct seq_file *m, void *data) return 0; } -static bool rps_eval(const struct intel_gt *gt) +static bool rps_eval(void *data) { + struct intel_gt *gt = data; + return HAS_RPS(gt->i915); } @@ -597,5 +601,5 @@ void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root) { "rps_boost", &rps_boost_fops, rps_eval }, }; - debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files)); + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index cbad7fe722ce..cbedba857d43 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -64,7 +64,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) if (!--b->irq_enabled) irq_disable(engine); - b->irq_armed = false; + WRITE_ONCE(b->irq_armed, false); intel_gt_pm_put_async(engine->gt); } @@ -73,7 +73,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) struct intel_breadcrumbs *b = &engine->breadcrumbs; unsigned long flags; - if (!b->irq_armed) + if (!READ_ONCE(b->irq_armed)) return; spin_lock_irqsave(&b->irq_lock, flags); @@ -233,7 +233,7 @@ static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) * which we can add a new waiter and avoid the cost of re-enabling * the irq. */ - b->irq_armed = true; + WRITE_ONCE(b->irq_armed, true); /* * Since we are waiting on a request, the GPU should be busy diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index aea992e46c42..74ddb49b2941 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -114,6 +114,11 @@ int __intel_context_do_pin(struct intel_context *ce) goto out_release; } + if (unlikely(intel_context_is_closed(ce))) { + err = -ENOENT; + goto out_unlock; + } + if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { err = intel_context_active_acquire(ce); if (unlikely(err)) diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index b469de0dd9b6..d9ee64e2ef79 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -199,6 +199,8 @@ void intel_engine_cleanup(struct intel_engine_cs *engine); int intel_engines_init_mmio(struct intel_gt *gt); int intel_engines_init(struct intel_gt *gt); +void intel_engine_free_request_pool(struct intel_engine_cs *engine); + void intel_engines_release(struct intel_gt *gt); void intel_engines_free(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 3aa8a652c16d..b1f8527f02c8 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -347,8 +347,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) gt->engine_class[info->class][info->instance] = engine; gt->engine[id] = engine; - i915->engine[id] = engine; - return 0; } @@ -425,17 +423,27 @@ void intel_engines_release(struct intel_gt *gt) engine->release = NULL; memset(&engine->reset, 0, sizeof(engine->reset)); - - gt->i915->engine[id] = NULL; } } +void intel_engine_free_request_pool(struct intel_engine_cs *engine) +{ + if (!engine->request_pool) + return; + + kmem_cache_free(i915_request_slab_cache(), engine->request_pool); +} + void intel_engines_free(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; + /* Free the requests! dma-resv keeps fences around for an eternity */ + rcu_barrier(); + for_each_engine(engine, gt, id) { + intel_engine_free_request_pool(engine); kfree(engine); gt->engine[id] = NULL; } @@ -1225,6 +1233,49 @@ static void print_request(struct drm_printer *m, name); } +static struct intel_timeline *get_timeline(struct i915_request *rq) +{ + struct intel_timeline *tl; + + /* + * Even though we are holding the engine->active.lock here, there + * is no control over the submission queue per-se and we are + * inspecting the active state at a random point in time, with an + * unknown queue. Play safe and make sure the timeline remains valid. + * (Only being used for pretty printing, one extra kref shouldn't + * cause a camel stampede!) + */ + rcu_read_lock(); + tl = rcu_dereference(rq->timeline); + if (!kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + + return tl; +} + +static int print_ring(char *buf, int sz, struct i915_request *rq) +{ + int len = 0; + + if (!i915_request_signaled(rq)) { + struct intel_timeline *tl = get_timeline(rq); + + len = scnprintf(buf, sz, + "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ", + i915_ggtt_offset(rq->ring->vma), + tl ? tl->hwsp_offset : 0, + hwsp_seqno(rq), + DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context), + 1000 * 1000)); + + if (tl) + intel_timeline_put(tl); + } + + return len; +} + static void hexdump(struct drm_printer *m, const void *buf, size_t len) { const size_t rowsize = 8 * sizeof(u32); @@ -1254,27 +1305,6 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len) } } -static struct intel_timeline *get_timeline(struct i915_request *rq) -{ - struct intel_timeline *tl; - - /* - * Even though we are holding the engine->active.lock here, there - * is no control over the submission queue per-se and we are - * inspecting the active state at a random point in time, with an - * unknown queue. Play safe and make sure the timeline remains valid. - * (Only being used for pretty printing, one extra kref shouldn't - * cause a camel stampede!) - */ - rcu_read_lock(); - tl = rcu_dereference(rq->timeline); - if (!kref_get_unless_zero(&tl->kref)) - tl = NULL; - rcu_read_unlock(); - - return tl; -} - static const char *repr_timer(const struct timer_list *t) { if (!READ_ONCE(t->expires)) @@ -1295,6 +1325,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); + if (HAS_EXECLISTS(dev_priv)) { + drm_printf(m, "\tEL_STAT_HI: 0x%08x\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); + drm_printf(m, "\tEL_STAT_LO: 0x%08x\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_LO)); + } drm_printf(m, "\tRING_START: 0x%08x\n", ENGINE_READ(engine, RING_START)); drm_printf(m, "\tRING_HEAD: 0x%08x\n", @@ -1387,39 +1423,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, int len; len = scnprintf(hdr, sizeof(hdr), - "\t\tActive[%d]: ", - (int)(port - execlists->active)); - if (!i915_request_signaled(rq)) { - struct intel_timeline *tl = get_timeline(rq); - - len += scnprintf(hdr + len, sizeof(hdr) - len, - "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ", - i915_ggtt_offset(rq->ring->vma), - tl ? tl->hwsp_offset : 0, - hwsp_seqno(rq), - DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context), - 1000 * 1000)); - - if (tl) - intel_timeline_put(tl); - } + "\t\tActive[%d]: ccid:%08x, ", + (int)(port - execlists->active), + upper_32_bits(rq->context->lrc_desc)); + len += print_ring(hdr + len, sizeof(hdr) - len, rq); scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); print_request(m, rq, hdr); } for (port = execlists->pending; (rq = *port); port++) { - struct intel_timeline *tl = get_timeline(rq); - char hdr[80]; - - snprintf(hdr, sizeof(hdr), - "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", - (int)(port - execlists->pending), - i915_ggtt_offset(rq->ring->vma), - tl ? tl->hwsp_offset : 0, - hwsp_seqno(rq)); - print_request(m, rq, hdr); + char hdr[160]; + int len; - if (tl) - intel_timeline_put(tl); + len = scnprintf(hdr, sizeof(hdr), + "\t\tPending[%d]: ccid:%08x, ", + (int)(port - execlists->pending), + upper_32_bits(rq->context->lrc_desc)); + len += print_ring(hdr + len, sizeof(hdr) - len, rq); + scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); + print_request(m, rq, hdr); } rcu_read_unlock(); execlists_active_unlock_bh(execlists); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index dd825718e4e5..5136c8bf112d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -31,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine) delay = msecs_to_jiffies_timeout(delay); if (delay >= HZ) delay = round_jiffies_up_relative(delay); - schedule_delayed_work(&engine->heartbeat.work, delay); + mod_delayed_work(system_wq, &engine->heartbeat.work, delay); return true; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index b6cf284e3a2d..3be679741d22 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -181,7 +181,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) * Ergo, if we put ourselves on the timelines.active_list * (se intel_timeline_enter()) before we increment the * engine->wakeref.count, we may see the request completion and retire - * it causing an undeflow of the engine->wakeref. + * it causing an underflow of the engine->wakeref. */ flags = __timeline_mark_lock(ce); GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index e52c2b0cb245..418df0a13145 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine) intel_wakeref_put_async(&engine->wakeref); } +static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine, + unsigned long delay) +{ + intel_wakeref_put_delay(&engine->wakeref, delay); +} + static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) { intel_wakeref_unlock_wait(&engine->wakeref); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 80cdde712842..01d4bd781a2f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -157,6 +157,15 @@ struct intel_engine_execlists { struct i915_priolist default_priolist; /** + * @yield: CCID at the time of the last semaphore-wait interrupt. + * + * Instead of leaving a semaphore busy-spinning on an engine, we would + * like to switch to another ready context, i.e. yielding the semaphore + * timeslice. + */ + u32 yield; + + /** * @error_interrupt: CS Master EIR * * The CS generates an interrupt when it detects an error. We capture @@ -308,6 +317,9 @@ struct intel_engine_cs { struct list_head hold; /* ready requests, but on hold */ } active; + /* keep a request in reserve for a [pm] barrier under oom */ + struct i915_request *request_pool; + struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index aed498a0d032..eebd1190506f 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -65,7 +65,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt) ggtt->mappable_end); } - i915_ggtt_init_fences(ggtt); + intel_ggtt_init_fences(ggtt); return 0; } @@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - struct sgt_iter sgt_iter; - gen8_pte_t __iomem *gtt_entries; const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *gte; + gen8_pte_t __iomem *end; + struct sgt_iter iter; dma_addr_t addr; /* @@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * not to allow the user to override access to a read only page. */ - gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; - gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(gtt_entries++, pte_encode | addr); + gte = (gen8_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + + for_each_sgt_daddr(addr, iter, vma->pages) + gen8_set_pte(gte++, pte_encode | addr); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + gen8_set_pte(gte++, vm->scratch[0].encode); /* * We want to flush the TLBs only after we're certain all the PTE @@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; - unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; + gen6_pte_t __iomem *gte; + gen6_pte_t __iomem *end; struct sgt_iter iter; dma_addr_t addr; + gte = (gen6_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, iter, vma->pages) - iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); + iowrite32(vm->pte_encode(addr, level, flags), gte++); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + iowrite32(vm->scratch[0].encode, gte++); /* * We want to flush the TLBs only after we're certain all the PTE @@ -698,11 +715,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) */ void i915_ggtt_driver_release(struct drm_i915_private *i915) { + struct i915_ggtt *ggtt = &i915->ggtt; struct pagevec *pvec; - fini_aliasing_ppgtt(&i915->ggtt); + fini_aliasing_ppgtt(ggtt); - ggtt_cleanup_hw(&i915->ggtt); + intel_ggtt_fini_fences(ggtt); + ggtt_cleanup_hw(ggtt); pvec = &i915->mm.wc_stash.pvec; if (pvec->nr) { @@ -767,13 +786,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) else ggtt->gsm = ioremap_wc(phys_addr, size); if (!ggtt->gsm) { - DRM_ERROR("Failed to map the ggtt page table\n"); + drm_err(&i915->drm, "Failed to map the ggtt page table\n"); return -ENOMEM; } ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); if (ret) { - DRM_ERROR("Scratch setup failed\n"); + drm_err(&i915->drm, "Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ iounmap(ggtt->gsm); return ret; @@ -833,7 +852,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); if (err) - DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); + drm_err(&i915->drm, + "Can't set DMA mask/consistent mask (%d)\n", err); pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); if (IS_CHERRYVIEW(i915)) @@ -980,7 +1000,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) * just a coarse sanity check. */ if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { - DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); + drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", + &ggtt->mappable_end); return -ENXIO; } @@ -988,7 +1009,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) - DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); + drm_err(&i915->drm, + "Can't set DMA mask/consistent mask (%d)\n", err); pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); size = gen6_get_total_gtt_size(snb_gmch_ctl); @@ -1035,7 +1057,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL); if (!ret) { - DRM_ERROR("failed to set up gmch\n"); + drm_err(&i915->drm, "failed to set up gmch\n"); return -EIO; } @@ -1058,7 +1080,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.vma_ops.clear_pages = clear_pages; if (unlikely(ggtt->do_idle_maps)) - dev_notice(i915->drm.dev, + drm_notice(&i915->drm, "Applying Ironlake quirks for intel_iommu\n"); return 0; @@ -1083,26 +1105,29 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) return ret; if ((ggtt->vm.total - 1) >> 32) { - DRM_ERROR("We never expected a Global GTT with more than 32bits" - " of address space! Found %lldM!\n", - ggtt->vm.total >> 20); + drm_err(&i915->drm, + "We never expected a Global GTT with more than 32bits" + " of address space! Found %lldM!\n", + ggtt->vm.total >> 20); ggtt->vm.total = 1ULL << 32; ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->vm.total); } if (ggtt->mappable_end > ggtt->vm.total) { - DRM_ERROR("mappable aperture extends past end of GGTT," - " aperture=%pa, total=%llx\n", - &ggtt->mappable_end, ggtt->vm.total); + drm_err(&i915->drm, + "mappable aperture extends past end of GGTT," + " aperture=%pa, total=%llx\n", + &ggtt->mappable_end, ggtt->vm.total); ggtt->mappable_end = ggtt->vm.total; } /* GMADR is the PCI mmio aperture into the global GTT. */ - DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); - DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); - DRM_DEBUG_DRIVER("DSM size = %lluM\n", - (u64)resource_size(&intel_graphics_stolen_res) >> 20); + drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); + drm_dbg(&i915->drm, "GMADR size = %lluM\n", + (u64)ggtt->mappable_end >> 20); + drm_dbg(&i915->drm, "DSM size = %lluM\n", + (u64)resource_size(&intel_graphics_stolen_res) >> 20); return 0; } @@ -1120,7 +1145,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) return ret; if (intel_vtd_active()) - dev_info(i915->drm.dev, "VT-d active for gfx access\n"); + drm_info(&i915->drm, "VT-d active for gfx access\n"); return 0; } @@ -1195,6 +1220,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt) if (INTEL_GEN(ggtt->vm.i915) >= 8) setup_private_pat(ggtt->vm.gt->uncore); + + intel_ggtt_restore_fences(ggtt); } static struct scatterlist * diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index d152b648c73c..7fb36b12fe7a 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -68,8 +68,7 @@ static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence) return fence->ggtt->vm.gt->uncore; } -static void i965_write_fence_reg(struct i915_fence_reg *fence, - struct i915_vma *vma) +static void i965_write_fence_reg(struct i915_fence_reg *fence) { i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; @@ -87,18 +86,16 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence, } val = 0; - if (vma) { - unsigned int stride = i915_gem_object_get_stride(vma->obj); + if (fence->tiling) { + unsigned int stride = fence->stride; - GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); - GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE)); - GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE)); GEM_BUG_ON(!IS_ALIGNED(stride, 128)); - val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32; - val |= vma->node.start; + val = fence->start + fence->size - I965_FENCE_PAGE; + val <<= 32; + val |= fence->start; val |= (u64)((stride / 128) - 1) << fence_pitch_shift; - if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y) + if (fence->tiling == I915_TILING_Y) val |= BIT(I965_FENCE_TILING_Y_SHIFT); val |= I965_FENCE_REG_VALID; } @@ -125,21 +122,15 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence, } } -static void i915_write_fence_reg(struct i915_fence_reg *fence, - struct i915_vma *vma) +static void i915_write_fence_reg(struct i915_fence_reg *fence) { u32 val; val = 0; - if (vma) { - unsigned int tiling = i915_gem_object_get_tiling(vma->obj); + if (fence->tiling) { + unsigned int stride = fence->stride; + unsigned int tiling = fence->tiling; bool is_y_tiled = tiling == I915_TILING_Y; - unsigned int stride = i915_gem_object_get_stride(vma->obj); - - GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); - GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK); - GEM_BUG_ON(!is_power_of_2(vma->fence_size)); - GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size)); if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence))) stride /= 128; @@ -147,10 +138,10 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence, stride /= 512; GEM_BUG_ON(!is_power_of_2(stride)); - val = vma->node.start; + val = fence->start; if (is_y_tiled) val |= BIT(I830_FENCE_TILING_Y_SHIFT); - val |= I915_FENCE_SIZE_BITS(vma->fence_size); + val |= I915_FENCE_SIZE_BITS(fence->size); val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; @@ -165,25 +156,18 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence, } } -static void i830_write_fence_reg(struct i915_fence_reg *fence, - struct i915_vma *vma) +static void i830_write_fence_reg(struct i915_fence_reg *fence) { u32 val; val = 0; - if (vma) { - unsigned int stride = i915_gem_object_get_stride(vma->obj); - - GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); - GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK); - GEM_BUG_ON(!is_power_of_2(vma->fence_size)); - GEM_BUG_ON(!is_power_of_2(stride / 128)); - GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size)); + if (fence->tiling) { + unsigned int stride = fence->stride; - val = vma->node.start; - if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y) + val = fence->start; + if (fence->tiling == I915_TILING_Y) val |= BIT(I830_FENCE_TILING_Y_SHIFT); - val |= I830_FENCE_SIZE_BITS(vma->fence_size); + val |= I830_FENCE_SIZE_BITS(fence->size); val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; } @@ -197,8 +181,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence, } } -static void fence_write(struct i915_fence_reg *fence, - struct i915_vma *vma) +static void fence_write(struct i915_fence_reg *fence) { struct drm_i915_private *i915 = fence_to_i915(fence); @@ -209,18 +192,21 @@ static void fence_write(struct i915_fence_reg *fence, */ if (IS_GEN(i915, 2)) - i830_write_fence_reg(fence, vma); + i830_write_fence_reg(fence); else if (IS_GEN(i915, 3)) - i915_write_fence_reg(fence, vma); + i915_write_fence_reg(fence); else - i965_write_fence_reg(fence, vma); + i965_write_fence_reg(fence); /* * Access through the fenced region afterwards is * ordered by the posting reads whilst writing the registers. */ +} - fence->dirty = false; +static bool gpu_uses_fence_registers(struct i915_fence_reg *fence) +{ + return INTEL_GEN(fence_to_i915(fence)) < 4; } static int fence_update(struct i915_fence_reg *fence, @@ -232,27 +218,32 @@ static int fence_update(struct i915_fence_reg *fence, struct i915_vma *old; int ret; + fence->tiling = 0; if (vma) { + GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) || + !i915_gem_object_get_tiling(vma->obj)); + if (!i915_vma_is_map_and_fenceable(vma)) return -EINVAL; - if (drm_WARN(&uncore->i915->drm, - !i915_gem_object_get_stride(vma->obj) || - !i915_gem_object_get_tiling(vma->obj), - "bogus fence setup with stride: 0x%x, tiling mode: %i\n", - i915_gem_object_get_stride(vma->obj), - i915_gem_object_get_tiling(vma->obj))) - return -EINVAL; + if (gpu_uses_fence_registers(fence)) { + /* implicit 'unfenced' GPU blits */ + ret = i915_vma_sync(vma); + if (ret) + return ret; + } - ret = i915_vma_sync(vma); - if (ret) - return ret; + fence->start = vma->node.start; + fence->size = vma->fence_size; + fence->stride = i915_gem_object_get_stride(vma->obj); + fence->tiling = i915_gem_object_get_tiling(vma->obj); } + WRITE_ONCE(fence->dirty, false); old = xchg(&fence->vma, NULL); if (old) { /* XXX Ideally we would move the waiting to outside the mutex */ - ret = i915_vma_sync(old); + ret = i915_active_wait(&fence->active); if (ret) { fence->vma = old; return ret; @@ -276,7 +267,7 @@ static int fence_update(struct i915_fence_reg *fence, /* * We only need to update the register itself if the device is awake. * If the device is currently powered down, we will defer the write - * to the runtime resume, see i915_gem_restore_fences(). + * to the runtime resume, see intel_ggtt_restore_fences(). * * This only works for removing the fence register, on acquisition * the caller must hold the rpm wakeref. The fence register must @@ -290,7 +281,7 @@ static int fence_update(struct i915_fence_reg *fence, } WRITE_ONCE(fence->vma, vma); - fence_write(fence, vma); + fence_write(fence); if (vma) { vma->fence = fence; @@ -307,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence, * * This function force-removes any fence from the given object, which is useful * if the kernel wants to do untiled GTT access. - * - * Returns: - * - * 0 on success, negative error code on failure. */ -int i915_vma_revoke_fence(struct i915_vma *vma) +void i915_vma_revoke_fence(struct i915_vma *vma) { struct i915_fence_reg *fence = vma->fence; + intel_wakeref_t wakeref; lockdep_assert_held(&vma->vm->mutex); if (!fence) - return 0; + return; - if (atomic_read(&fence->pin_count)) - return -EBUSY; + GEM_BUG_ON(fence->vma != vma); + GEM_BUG_ON(!i915_active_is_idle(&fence->active)); + GEM_BUG_ON(atomic_read(&fence->pin_count)); - return fence_update(fence, NULL); + fence->tiling = 0; + WRITE_ONCE(fence->vma, NULL); + vma->fence = NULL; + + with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref) + fence_write(fence); } static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt) @@ -487,34 +481,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence) } /** - * i915_gem_restore_fences - restore fence state + * intel_ggtt_restore_fences - restore fence state * @ggtt: Global GTT * * Restore the hw fence state to match the software tracking again, to be called * after a gpu reset and on resume. Note that on runtime suspend we only cancel * the fences, to be reacquired by the user later. */ -void i915_gem_restore_fences(struct i915_ggtt *ggtt) +void intel_ggtt_restore_fences(struct i915_ggtt *ggtt) { int i; - rcu_read_lock(); /* keep obj alive as we dereference */ - for (i = 0; i < ggtt->num_fences; i++) { - struct i915_fence_reg *reg = &ggtt->fence_regs[i]; - struct i915_vma *vma = READ_ONCE(reg->vma); - - GEM_BUG_ON(vma && vma->fence != reg); - - /* - * Commit delayed tiling changes if we have an object still - * attached to the fence, otherwise just clear the fence. - */ - if (vma && !i915_gem_object_is_tiled(vma->obj)) - vma = NULL; - - fence_write(reg, vma); - } - rcu_read_unlock(); + for (i = 0; i < ggtt->num_fences; i++) + fence_write(&ggtt->fence_regs[i]); } /** @@ -746,7 +725,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) * bit 17 of its physical address and therefore being interpreted differently * by the GPU. */ -static void i915_gem_swizzle_page(struct page *page) +static void swizzle_page(struct page *page) { char temp[64]; char *vaddr; @@ -791,7 +770,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, for_each_sgt_page(page, sgt_iter, pages) { char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { - i915_gem_swizzle_page(page); + swizzle_page(page); set_page_dirty(page); } i++; @@ -836,7 +815,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, } } -void i915_ggtt_init_fences(struct i915_ggtt *ggtt) +void intel_ggtt_init_fences(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; struct intel_uncore *uncore = ggtt->vm.gt->uncore; @@ -864,18 +843,37 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt) if (intel_vgpu_active(i915)) num_fences = intel_uncore_read(uncore, vgtif_reg(avail_rs.fence_num)); + ggtt->fence_regs = kcalloc(num_fences, + sizeof(*ggtt->fence_regs), + GFP_KERNEL); + if (!ggtt->fence_regs) + num_fences = 0; /* Initialize fence registers to zero */ for (i = 0; i < num_fences; i++) { struct i915_fence_reg *fence = &ggtt->fence_regs[i]; + i915_active_init(&fence->active, NULL, NULL); fence->ggtt = ggtt; fence->id = i; list_add_tail(&fence->link, &ggtt->fence_list); } ggtt->num_fences = num_fences; - i915_gem_restore_fences(ggtt); + intel_ggtt_restore_fences(ggtt); +} + +void intel_ggtt_fini_fences(struct i915_ggtt *ggtt) +{ + int i; + + for (i = 0; i < ggtt->num_fences; i++) { + struct i915_fence_reg *fence = &ggtt->fence_regs[i]; + + i915_active_fini(&fence->active); + } + + kfree(ggtt->fence_regs); } void intel_gt_init_swizzling(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h index 7bd521cd7cd7..9eef679e1311 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h @@ -22,12 +22,14 @@ * */ -#ifndef __I915_FENCE_REG_H__ -#define __I915_FENCE_REG_H__ +#ifndef __INTEL_GGTT_FENCING_H__ +#define __INTEL_GGTT_FENCING_H__ #include <linux/list.h> #include <linux/types.h> +#include "i915_active.h" + struct drm_i915_gem_object; struct i915_ggtt; struct i915_vma; @@ -41,6 +43,7 @@ struct i915_fence_reg { struct i915_ggtt *ggtt; struct i915_vma *vma; atomic_t pin_count; + struct i915_active active; int id; /** * Whether the tiling parameters for the currently @@ -51,20 +54,24 @@ struct i915_fence_reg { * command (such as BLT on gen2/3), as a "fence". */ bool dirty; + u32 start; + u32 size; + u32 tiling; + u32 stride; }; -/* i915_gem_fence_reg.c */ struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt); void i915_unreserve_fence(struct i915_fence_reg *fence); -void i915_gem_restore_fences(struct i915_ggtt *ggtt); +void intel_ggtt_restore_fences(struct i915_ggtt *ggtt); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages); -void i915_ggtt_init_fences(struct i915_ggtt *ggtt); +void intel_ggtt_init_fences(struct i915_ggtt *ggtt); +void intel_ggtt_fini_fences(struct i915_ggtt *ggtt); void intel_gt_init_swizzling(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index d09f7596cb98..1c99cc72305a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -635,8 +635,7 @@ void intel_gt_driver_remove(struct intel_gt *gt) { __intel_gt_disable(gt); - intel_uc_fini_hw(>->uc); - intel_uc_fini(>->uc); + intel_uc_driver_remove(>->uc); intel_engines_release(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index f0e7fd95165a..0cc7dd54f4f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir) } } + if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { + WRITE_ONCE(engine->execlists.yield, + ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); + ENGINE_TRACE(engine, "semaphore yield: %08x\n", + engine->execlists.yield); + if (del_timer(&engine->execlists.timer)) + tasklet = true; + } + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) tasklet = true; @@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) const u32 irqs = GT_CS_MASTER_ERROR_INTERRUPT | GT_RENDER_USER_INTERRUPT | - GT_CONTEXT_SWITCH_INTERRUPT; + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; struct intel_uncore *uncore = gt->uncore; const u32 dmask = irqs << 16 | irqs; const u32 smask = irqs << 16; @@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt) const u32 irqs = GT_CS_MASTER_ERROR_INTERRUPT | GT_RENDER_USER_INTERRUPT | - GT_CONTEXT_SWITCH_INTERRUPT; + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; const u32 gt_interrupts[] = { irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 8b653c0f5e5f..3e8a56c7d818 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -204,7 +204,7 @@ int intel_gt_resume(struct intel_gt *gt) /* Only when the HW is re-initialised, can we replay the requests */ err = intel_gt_init_hw(gt); if (err) { - dev_err(gt->i915->drm.dev, + drm_err(>->i915->drm, "Failed to initialize GPU, declaring it wedged!\n"); goto err_wedged; } @@ -220,7 +220,7 @@ int intel_gt_resume(struct intel_gt *gt) intel_engine_pm_put(engine); if (err) { - dev_err(gt->i915->drm.dev, + drm_err(>->i915->drm, "Failed to restart %s (%d)\n", engine->name, err); goto err_wedged; @@ -324,6 +324,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt) { GT_TRACE(gt, "\n"); intel_gt_init_swizzling(gt); + intel_ggtt_restore_fences(gt->ggtt); return intel_uc_runtime_resume(>->uc); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 24c99d0838af..835ec184763e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -38,7 +38,7 @@ static bool flush_submission(struct intel_gt *gt) for_each_engine(engine, gt, id) { intel_engine_flush_submission(engine); active |= flush_work(&engine->retire_work); - active |= flush_work(&engine->wakeref.work); + active |= flush_delayed_work(&engine->wakeref.work); } return active; diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index b3116fe8d180..d93ebdf3fa0e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -26,7 +26,6 @@ #include <drm/drm_mm.h> #include "gt/intel_reset.h" -#include "i915_gem_fence_reg.h" #include "i915_selftest.h" #include "i915_vma_types.h" @@ -135,6 +134,8 @@ typedef u64 gen8_pte_t; #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) +struct i915_fence_reg; + #define for_each_sgt_daddr(__dp, __iter, __sgt) \ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) @@ -333,7 +334,7 @@ struct i915_ggtt { u32 pin_bias; unsigned int num_fences; - struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; + struct i915_fence_reg *fence_regs; struct list_head fence_list; /** diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 683014e7bc51..6fbad5e2343f 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -238,6 +238,17 @@ __execlists_update_reg_state(const struct intel_context *ce, const struct intel_engine_cs *engine, u32 head); +static u32 intel_context_get_runtime(const struct intel_context *ce) +{ + /* + * We can use either ppHWSP[16] which is recorded before the context + * switch (and so excludes the cost of context switches) or use the + * value from the context image itself, which is saved/restored earlier + * and so includes the cost of the save. + */ + return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); +} + static void mark_eio(struct i915_request *rq) { if (i915_request_completed(rq)) @@ -1154,6 +1165,7 @@ static void restore_default_state(struct intel_context *ce, engine->context_size - PAGE_SIZE); execlists_init_reg_state(regs, ce, engine, ce->ring, false); + ce->runtime.last = intel_context_get_runtime(ce); } static void reset_active(struct i915_request *rq, @@ -1195,17 +1207,6 @@ static void reset_active(struct i915_request *rq, ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; } -static u32 intel_context_get_runtime(const struct intel_context *ce) -{ - /* - * We can use either ppHWSP[16] which is recorded before the context - * switch (and so excludes the cost of context switches) or use the - * value from the context image itself, which is saved/restored earlier - * and so includes the cost of the save. - */ - return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); -} - static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) { #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) @@ -1415,6 +1416,23 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc } } +static __maybe_unused char * +dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) +{ + if (!rq) + return ""; + + snprintf(buf, buflen, "%s%llx:%lld%s prio %d", + prefix, + rq->fence.context, rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + rq_prio(rq)); + + return buf; +} + static __maybe_unused void trace_ports(const struct intel_engine_execlists *execlists, const char *msg, @@ -1422,18 +1440,14 @@ trace_ports(const struct intel_engine_execlists *execlists, { const struct intel_engine_cs *engine = container_of(execlists, typeof(*engine), execlists); + char __maybe_unused p0[40], p1[40]; if (!ports[0]) return; - ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg, - ports[0]->fence.context, - ports[0]->fence.seqno, - i915_request_completed(ports[0]) ? "!" : - i915_request_started(ports[0]) ? "*" : - "", - ports[1] ? ports[1]->fence.context : 0, - ports[1] ? ports[1]->fence.seqno : 0); + ENGINE_TRACE(engine, "%s { %s%s }\n", msg, + dump_port(p0, sizeof(p0), "", ports[0]), + dump_port(p1, sizeof(p1), ", ", ports[1])); } static inline bool @@ -1754,7 +1768,8 @@ static void defer_active(struct intel_engine_cs *engine) } static bool -need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) +need_timeslice(const struct intel_engine_cs *engine, + const struct i915_request *rq) { int hint; @@ -1768,6 +1783,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) return hint >= effective_prio(rq); } +static bool +timeslice_yield(const struct intel_engine_execlists *el, + const struct i915_request *rq) +{ + /* + * Once bitten, forever smitten! + * + * If the active context ever busy-waited on a semaphore, + * it will be treated as a hog until the end of its timeslice (i.e. + * until it is scheduled out and replaced by a new submission, + * possibly even its own lite-restore). The HW only sends an interrupt + * on the first miss, and we do know if that semaphore has been + * signaled, or even if it is now stuck on another semaphore. Play + * safe, yield if it might be stuck -- it will be given a fresh + * timeslice in the near future. + */ + return upper_32_bits(rq->context->lrc_desc) == READ_ONCE(el->yield); +} + +static bool +timeslice_expired(const struct intel_engine_execlists *el, + const struct i915_request *rq) +{ + return timer_expired(&el->timer) || timeslice_yield(el, rq); +} + static int switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) { @@ -1783,8 +1824,7 @@ timeslice(const struct intel_engine_cs *engine) return READ_ONCE(engine->props.timeslice_duration_ms); } -static unsigned long -active_timeslice(const struct intel_engine_cs *engine) +static unsigned long active_timeslice(const struct intel_engine_cs *engine) { const struct intel_engine_execlists *execlists = &engine->execlists; const struct i915_request *rq = *execlists->active; @@ -1800,16 +1840,25 @@ active_timeslice(const struct intel_engine_cs *engine) static void set_timeslice(struct intel_engine_cs *engine) { + unsigned long duration; + if (!intel_engine_has_timeslices(engine)) return; - set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); + duration = active_timeslice(engine); + ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration); + + set_timer_ms(&engine->execlists.timer, duration); } static void start_timeslice(struct intel_engine_cs *engine) { struct intel_engine_execlists *execlists = &engine->execlists; - int prio = queue_prio(execlists); + const int prio = queue_prio(execlists); + unsigned long duration; + + if (!intel_engine_has_timeslices(engine)) + return; WRITE_ONCE(execlists->switch_priority_hint, prio); if (prio == INT_MIN) @@ -1818,7 +1867,12 @@ static void start_timeslice(struct intel_engine_cs *engine) if (timer_pending(&execlists->timer)) return; - set_timer_ms(&execlists->timer, timeslice(engine)); + duration = timeslice(engine); + ENGINE_TRACE(engine, + "start timeslicing, prio:%d, interval:%lu", + prio, duration); + + set_timer_ms(&execlists->timer, duration); } static void record_preemption(struct intel_engine_execlists *execlists) @@ -1915,11 +1969,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * of trouble. */ active = READ_ONCE(execlists->active); - while ((last = *active) && i915_request_completed(last)) - active++; - if (last) { + /* + * In theory we can skip over completed contexts that have not + * yet been processed by events (as those events are in flight): + * + * while ((last = *active) && i915_request_completed(last)) + * active++; + * + * However, the GPU cannot handle this as it will ultimately + * find itself trying to jump back into a context it has just + * completed and barf. + */ + + if ((last = *active)) { if (need_preempt(engine, last, rb)) { + if (i915_request_completed(last)) { + tasklet_hi_schedule(&execlists->tasklet); + return; + } + ENGINE_TRACE(engine, "preempting last=%llx:%lld, prio=%d, hint=%d\n", last->fence.context, @@ -1946,13 +2015,19 @@ static void execlists_dequeue(struct intel_engine_cs *engine) last = NULL; } else if (need_timeslice(engine, last) && - timer_expired(&engine->execlists.timer)) { + timeslice_expired(execlists, last)) { + if (i915_request_completed(last)) { + tasklet_hi_schedule(&execlists->tasklet); + return; + } + ENGINE_TRACE(engine, - "expired last=%llx:%lld, prio=%d, hint=%d\n", + "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", last->fence.context, last->fence.seqno, last->sched.attr.priority, - execlists->queue_priority_hint); + execlists->queue_priority_hint, + yesno(timeslice_yield(execlists, last))); ring_set_paused(engine, 1); defer_active(engine); @@ -2213,6 +2288,7 @@ done: } clear_ports(port + 1, last_port - port); + WRITE_ONCE(execlists->yield, -1); execlists_submit_ports(engine); set_preempt_timeout(engine, *active); } else { @@ -2308,6 +2384,13 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); } +static inline void flush_hwsp(const struct i915_request *rq) +{ + mb(); + clflush((void *)READ_ONCE(rq->hwsp_seqno)); + mb(); +} + static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -2384,8 +2467,6 @@ static void process_csb(struct intel_engine_cs *engine) if (promote) { struct i915_request * const *old = execlists->active; - GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); - ring_set_paused(engine, 0); /* Point active to the new ELSP; prevent overwriting */ @@ -2398,6 +2479,7 @@ static void process_csb(struct intel_engine_cs *engine) execlists_schedule_out(*old++); /* switch pending to inflight */ + GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); memcpy(execlists->inflight, execlists->pending, execlists_num_ports(execlists) * @@ -2419,13 +2501,24 @@ static void process_csb(struct intel_engine_cs *engine) * user interrupt and CSB is processed. */ if (GEM_SHOW_DEBUG() && - !i915_request_completed(*execlists->active) && - !reset_in_progress(execlists)) { - struct i915_request *rq __maybe_unused = - *execlists->active; + !i915_request_completed(*execlists->active)) { + struct i915_request *rq = *execlists->active; const u32 *regs __maybe_unused = rq->context->lrc_reg_state; + /* + * Flush the breadcrumb before crying foul. + * + * Since we have hit this on icl and seen the + * breadcrumb advance as we print out the debug + * info (so the problem corrected itself without + * lasting damage), and we know that icl suffers + * from missing global observation points in + * execlists, presume that affects even more + * coherency. + */ + flush_hwsp(rq); + ENGINE_TRACE(engine, "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", ENGINE_READ(engine, RING_START), @@ -2446,7 +2539,10 @@ static void process_csb(struct intel_engine_cs *engine) regs[CTX_RING_HEAD], regs[CTX_RING_TAIL]); - GEM_BUG_ON("context completed before request"); + /* Still? Declare it caput! */ + if (!i915_request_completed(rq) && + !reset_in_progress(execlists)) + GEM_BUG_ON("context completed before request"); } execlists_schedule_out(*execlists->active++); @@ -2736,6 +2832,45 @@ err_cap: return NULL; } +static struct i915_request * +active_context(struct intel_engine_cs *engine, u32 ccid) +{ + const struct intel_engine_execlists * const el = &engine->execlists; + struct i915_request * const *port, *rq; + + /* + * Use the most recent result from process_csb(), but just in case + * we trigger an error (via interrupt) before the first CS event has + * been written, peek at the next submission. + */ + + for (port = el->active; (rq = *port); port++) { + if (upper_32_bits(rq->context->lrc_desc) == ccid) { + ENGINE_TRACE(engine, + "ccid found at active:%zd\n", + port - el->active); + return rq; + } + } + + for (port = el->pending; (rq = *port); port++) { + if (upper_32_bits(rq->context->lrc_desc) == ccid) { + ENGINE_TRACE(engine, + "ccid found at pending:%zd\n", + port - el->pending); + return rq; + } + } + + ENGINE_TRACE(engine, "ccid:%x not found\n", ccid); + return NULL; +} + +static u32 active_ccid(struct intel_engine_cs *engine) +{ + return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI); +} + static bool execlists_capture(struct intel_engine_cs *engine) { struct execlists_capture *cap; @@ -2753,7 +2888,7 @@ static bool execlists_capture(struct intel_engine_cs *engine) return true; spin_lock_irq(&engine->active.lock); - cap->rq = execlists_active(&engine->execlists); + cap->rq = active_context(engine, active_ccid(engine)); if (cap->rq) { cap->rq = active_request(cap->rq->context->timeline, cap->rq); cap->rq = i915_request_get_rcu(cap->rq); @@ -2901,10 +3036,14 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) if (reset_in_progress(execlists)) return; /* defer until we restart the engine following reset */ - if (execlists->tasklet.func == execlists_submission_tasklet) - __execlists_submission_tasklet(engine); - else - tasklet_hi_schedule(&execlists->tasklet); + /* Hopefully we clear execlists->pending[] to let us through */ + if (READ_ONCE(execlists->pending[0]) && + tasklet_trylock(&execlists->tasklet)) { + process_csb(engine); + tasklet_unlock(&execlists->tasklet); + } + + __execlists_submission_tasklet(engine); } static void submit_queue(struct intel_engine_cs *engine, @@ -2990,7 +3129,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine) vaddr += engine->context_size; if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) - dev_err_once(engine->i915->drm.dev, + drm_err_once(&engine->i915->drm, "%s context redzone overwritten!\n", engine->name); } @@ -3442,7 +3581,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) ret = lrc_setup_wa_ctx(engine); if (ret) { - DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); + drm_dbg(&engine->i915->drm, + "Failed to setup context WA page: %d\n", ret); return ret; } @@ -3485,7 +3625,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine) status = ENGINE_READ(engine, RING_ESR); if (unlikely(status)) { - dev_err(engine->i915->drm.dev, + drm_err(&engine->i915->drm, "engine '%s' resumed still in error: %08x\n", engine->name, status); __intel_gt_reset(engine->gt, engine->mask); @@ -3549,7 +3689,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) bool unexpected = false; if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { - DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); + drm_dbg(&engine->i915->drm, + "STOP_RING still set in RING_MI_MODE\n"); unexpected = true; } @@ -3609,6 +3750,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) * * FIXME: Wa for more modern gens needs to be validated */ + ring_set_paused(engine, 1); intel_engine_stop_cs(engine); } @@ -4449,6 +4591,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; + engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; } static void rcs_submission_override(struct intel_engine_cs *engine) @@ -4493,7 +4636,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) * because we only expect rare glitches but nothing * critical to prevent us from using GPU */ - DRM_ERROR("WA batch buffer initialization failed\n"); + drm_err(&i915->drm, "WA batch buffer initialization failed\n"); if (HAS_LOGICAL_RING_ELSQ(i915)) { execlists->submit_reg = uncore->regs + @@ -4575,6 +4718,7 @@ static void init_common_reg_state(u32 * const regs, regs[CTX_CONTEXT_CONTROL] = ctl; regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; + regs[CTX_TIMESTAMP] = 0; } static void init_wa_bb_reg_state(u32 * const regs, @@ -4668,7 +4812,8 @@ populate_lr_context(struct intel_context *ce, vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); - DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); + drm_dbg(&engine->i915->drm, + "Could not map object pages! (%d)\n", ret); return ret; } @@ -4761,7 +4906,8 @@ static int __execlists_context_alloc(struct intel_context *ce, ret = populate_lr_context(ce, ctx_obj, engine, ring); if (ret) { - DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); + drm_dbg(&engine->i915->drm, + "Failed to populate LRC: %d\n", ret); goto error_ring_free; } @@ -4814,6 +4960,8 @@ static void virtual_context_destroy(struct kref *kref) __execlists_context_fini(&ve->context); intel_context_fini(&ve->context); + intel_engine_free_request_pool(&ve->base); + kfree(ve->bonds); kfree(ve); } @@ -4994,10 +5142,8 @@ static void virtual_submission_tasklet(unsigned long data) submit_engine: GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); node->prio = prio; - if (first && prio > sibling->execlists.queue_priority_hint) { - sibling->execlists.queue_priority_hint = prio; + if (first && prio > sibling->execlists.queue_priority_hint) tasklet_hi_schedule(&sibling->execlists.tasklet); - } spin_unlock(&sibling->active.lock); } diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 3847ee44b181..1c1923ec8be7 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -246,16 +246,18 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); if (IS_GEN(i915, 6) && ret) { - DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); + drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n"); } else if (IS_GEN(i915, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { - DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", - GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); + drm_dbg(&i915->drm, + "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", + GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; rc6vids |= GEN6_ENCODE_RC6_VID(450); ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); if (ret) - DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); + drm_err(&i915->drm, + "Couldn't fix incorrect rc6 voltage\n"); } } @@ -263,14 +265,15 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) static int chv_rc6_init(struct intel_rc6 *rc6) { struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_private *i915 = rc6_to_i915(rc6); resource_size_t pctx_paddr, paddr; resource_size_t pctx_size = 32 * SZ_1K; u32 pcbr; pcbr = intel_uncore_read(uncore, VLV_PCBR); if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); - paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size; + drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n"); + paddr = i915->dsm.end + 1 - pctx_size; GEM_BUG_ON(paddr > U32_MAX); pctx_paddr = (paddr & ~4095); @@ -304,7 +307,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) goto out; } - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n"); /* * From the Gunit register HAS: @@ -316,7 +319,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) */ pctx = i915_gem_object_create_stolen(i915, pctx_size); if (IS_ERR(pctx)) { - DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); + drm_dbg(&i915->drm, + "not enough stolen space for PCTX, disabling\n"); return PTR_ERR(pctx); } @@ -398,14 +402,14 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE); rc_sw_target &= RC_SW_TARGET_STATE_MASK; rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT; - DRM_DEBUG_DRIVER("BIOS enabled RC states: " + drm_dbg(&i915->drm, "BIOS enabled RC states: " "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), rc_sw_target); if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) { - DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); + drm_dbg(&i915->drm, "RC6 Base location not set properly.\n"); enable_rc6 = false; } @@ -417,7 +421,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK; if (!(rc6_ctx_base >= i915->dsm_reserved.start && rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) { - DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); + drm_dbg(&i915->drm, "RC6 Base address not as expected.\n"); enable_rc6 = false; } @@ -425,24 +429,25 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 && (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 && (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) { - DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); + drm_dbg(&i915->drm, + "Engine Idle wait time not set properly.\n"); enable_rc6 = false; } if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) || !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) || !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) { - DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); + drm_dbg(&i915->drm, "Pushbus not setup properly.\n"); enable_rc6 = false; } if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) { - DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); + drm_dbg(&i915->drm, "GFX pause not setup properly.\n"); enable_rc6 = false; } if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) { - DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); + drm_dbg(&i915->drm, "GPM control not setup properly.\n"); enable_rc6 = false; } @@ -463,7 +468,7 @@ static bool rc6_supported(struct intel_rc6 *rc6) return false; if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) { - dev_notice(i915->drm.dev, + drm_notice(&i915->drm, "RC6 and powersaving disabled by BIOS\n"); return false; } @@ -495,7 +500,7 @@ static bool pctx_corrupted(struct intel_rc6 *rc6) if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO)) return false; - dev_notice(i915->drm.dev, + drm_notice(&i915->drm, "RC6 context corruption, disabling runtime power management\n"); return true; } diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 5954ecc3207f..26e78db33675 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -102,7 +102,7 @@ static int render_state_setup(struct intel_renderstate *so, } if (rodata->reloc[reloc_index] != -1) { - DRM_ERROR("only %d relocs resolved\n", reloc_index); + drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index); goto err; } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 80db3c9d785e..39070b514e65 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -109,7 +109,7 @@ static bool mark_guilty(struct i915_request *rq) goto out; } - dev_notice(ctx->i915->drm.dev, + drm_notice(&ctx->i915->drm, "%s context reset due to GPU hang\n", ctx->name); @@ -755,7 +755,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) for_each_engine(engine, gt, id) __intel_engine_reset(engine, stalled_mask & engine->mask); - i915_gem_restore_fences(gt->ggtt); + intel_ggtt_restore_fences(gt->ggtt); return err; } @@ -1031,7 +1031,7 @@ void intel_gt_reset(struct intel_gt *gt, goto unlock; if (reason) - dev_notice(gt->i915->drm.dev, + drm_notice(>->i915->drm, "Resetting chip for %s\n", reason); atomic_inc(>->i915->gpu_error.reset_count); @@ -1039,7 +1039,7 @@ void intel_gt_reset(struct intel_gt *gt, if (!intel_has_gpu_reset(gt)) { if (i915_modparams.reset) - dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); + drm_err(>->i915->drm, "GPU reset not supported\n"); else drm_dbg(>->i915->drm, "GPU reset disabled\n"); goto error; @@ -1049,7 +1049,7 @@ void intel_gt_reset(struct intel_gt *gt, intel_runtime_pm_disable_interrupts(gt->i915); if (do_reset(gt, stalled_mask)) { - dev_err(gt->i915->drm.dev, "Failed to reset chip\n"); + drm_err(>->i915->drm, "Failed to reset chip\n"); goto taint; } @@ -1111,7 +1111,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) /** * intel_engine_reset - reset GPU engine to recover from a hang * @engine: engine to reset - * @msg: reason for GPU reset; or NULL for no dev_notice() + * @msg: reason for GPU reset; or NULL for no drm_notice() * * Reset a specific GPU engine. Useful if a hang is detected. * Returns zero on successful reset or otherwise an error code. @@ -1136,7 +1136,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) reset_prepare_engine(engine); if (msg) - dev_notice(engine->i915->drm.dev, + drm_notice(&engine->i915->drm, "Resetting %s for %s\n", engine->name, msg); atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); @@ -1381,7 +1381,7 @@ static void intel_wedge_me(struct work_struct *work) { struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); - dev_err(w->gt->i915->drm.dev, + drm_err(&w->gt->i915->drm, "%s timed out, cancelling all in-flight rendering.\n", w->name); intel_gt_set_wedged(w->gt); diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h index 5bdce24994aa..cc0ebca65167 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.h +++ b/drivers/gpu/drm/i915/gt/intel_ring.h @@ -88,6 +88,8 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) static inline void assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) { + unsigned int head = READ_ONCE(ring->head); + GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); /* @@ -105,8 +107,7 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) * into the same cacheline as ring->head. */ #define cacheline(a) round_down(a, CACHELINE_BYTES) - GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && - tail < ring->head); + GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head); #undef cacheline } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index fdc3f10e12aa..d015f7b8b28e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -577,8 +577,9 @@ static void flush_cs_tlb(struct intel_engine_cs *engine) RING_INSTPM(engine->mmio_base), INSTPM_SYNC_FLUSH, 0, 1000)) - DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", - engine->name); + drm_err(&dev_priv->drm, + "%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + engine->name); } static void ring_setup_status_page(struct intel_engine_cs *engine) @@ -601,8 +602,9 @@ static bool stop_ring(struct intel_engine_cs *engine) MODE_IDLE, MODE_IDLE, 1000)) { - DRM_ERROR("%s : timed out trying to stop ring\n", - engine->name); + drm_err(&dev_priv->drm, + "%s : timed out trying to stop ring\n", + engine->name); /* * Sometimes we observe that the idle flag is not @@ -661,22 +663,23 @@ static int xcs_resume(struct intel_engine_cs *engine) /* WaClearRingBufHeadRegAtInit:ctg,elk */ if (!stop_ring(engine)) { /* G45 ring initialization often fails to reset head to zero */ - DRM_DEBUG_DRIVER("%s head not reset to zero " + drm_dbg(&dev_priv->drm, "%s head not reset to zero " + "ctl %08x head %08x tail %08x start %08x\n", + engine->name, + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_HEAD), + ENGINE_READ(engine, RING_TAIL), + ENGINE_READ(engine, RING_START)); + + if (!stop_ring(engine)) { + drm_err(&dev_priv->drm, + "failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", engine->name, ENGINE_READ(engine, RING_CTL), ENGINE_READ(engine, RING_HEAD), ENGINE_READ(engine, RING_TAIL), ENGINE_READ(engine, RING_START)); - - if (!stop_ring(engine)) { - DRM_ERROR("failed to set %s head to zero " - "ctl %08x head %08x tail %08x start %08x\n", - engine->name, - ENGINE_READ(engine, RING_CTL), - ENGINE_READ(engine, RING_HEAD), - ENGINE_READ(engine, RING_TAIL), - ENGINE_READ(engine, RING_START)); ret = -EIO; goto out; } @@ -719,7 +722,7 @@ static int xcs_resume(struct intel_engine_cs *engine) RING_CTL(engine->mmio_base), RING_VALID, RING_VALID, 50)) { - DRM_ERROR("%s initialization failed " + drm_err(&dev_priv->drm, "%s initialization failed " "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", engine->name, ENGINE_READ(engine, RING_CTL), diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index cfaf141bac4d..4dcfae16a7ce 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -81,13 +81,14 @@ static void rps_enable_interrupts(struct intel_rps *rps) events = (GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT); - WRITE_ONCE(rps->pm_events, events); + spin_lock_irq(>->irq_lock); gen6_gt_pm_enable_irq(gt, rps->pm_events); spin_unlock_irq(>->irq_lock); - set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq)); + intel_uncore_write(gt->uncore, + GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); } static void gen6_rps_reset_interrupts(struct intel_rps *rps) @@ -120,7 +121,9 @@ static void rps_disable_interrupts(struct intel_rps *rps) struct intel_gt *gt = rps_to_gt(rps); WRITE_ONCE(rps->pm_events, 0); - set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); + + intel_uncore_write(gt->uncore, + GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); spin_lock_irq(>->irq_lock); gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); @@ -183,14 +186,12 @@ static void gen5_rps_init(struct intel_rps *rps) fmin = (rgvmodectl & MEMMODE_FMIN_MASK); fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT; - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", - fmax, fmin, fstart); + drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); rps->min_freq = fmax; + rps->efficient_freq = fstart; rps->max_freq = fmin; - - rps->idle_freq = rps->min_freq; - rps->cur_freq = rps->idle_freq; } static unsigned long @@ -453,7 +454,8 @@ static bool gen5_rps_enable(struct intel_rps *rps) if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) - DRM_ERROR("stuck trying to change perf mode\n"); + drm_err(&uncore->i915->drm, + "stuck trying to change perf mode\n"); mdelay(1); gen5_rps_set(rps, rps->cur_freq); @@ -712,8 +714,6 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update) void intel_rps_unpark(struct intel_rps *rps) { - u8 freq; - if (!rps->enabled) return; @@ -725,9 +725,10 @@ void intel_rps_unpark(struct intel_rps *rps) WRITE_ONCE(rps->active, true); - freq = max(rps->cur_freq, rps->efficient_freq), - freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit); - intel_rps_set(rps, freq); + intel_rps_set(rps, + clamp(rps->cur_freq, + rps->min_freq_softlimit, + rps->max_freq_softlimit)); rps->last_adj = 0; @@ -893,12 +894,13 @@ static void gen6_rps_init(struct intel_rps *rps) static bool rps_reset(struct intel_rps *rps) { + struct drm_i915_private *i915 = rps_to_i915(rps); /* force a reset */ rps->power.mode = -1; rps->last_freq = -1; if (rps_set(rps, rps->min_freq, true)) { - DRM_ERROR("Failed to reset RPS to initial values\n"); + drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); return false; } @@ -1049,8 +1051,8 @@ static bool chv_rps_enable(struct intel_rps *rps) drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, "GPLL not enabled\n"); - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); return rps_reset(rps); } @@ -1147,8 +1149,8 @@ static bool vlv_rps_enable(struct intel_rps *rps) drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, "GPLL not enabled\n"); - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); return rps_reset(rps); } @@ -1305,7 +1307,8 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps) CCK_GPLL_CLOCK_CONTROL, i915->czclk_freq); - DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq); + drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", + rps->gpll_ref_freq); } static void vlv_rps_init(struct intel_rps *rps) @@ -1333,28 +1336,24 @@ static void vlv_rps_init(struct intel_rps *rps) i915->mem_freq = 1333; break; } - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); rps->max_freq = vlv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->max_freq), - rps->max_freq); + drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), rps->max_freq); rps->efficient_freq = vlv_rps_rpe_freq(rps); - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->efficient_freq), - rps->efficient_freq); + drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); rps->rp1_freq = vlv_rps_guar_freq(rps); - DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->rp1_freq), - rps->rp1_freq); + drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); rps->min_freq = vlv_rps_min_freq(rps); - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->min_freq), - rps->min_freq); + drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), rps->min_freq); vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT) | @@ -1384,28 +1383,24 @@ static void chv_rps_init(struct intel_rps *rps) i915->mem_freq = 1600; break; } - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); rps->max_freq = chv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->max_freq), - rps->max_freq); + drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), rps->max_freq); rps->efficient_freq = chv_rps_rpe_freq(rps); - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->efficient_freq), - rps->efficient_freq); + drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); rps->rp1_freq = chv_rps_guar_freq(rps); - DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->rp1_freq), - rps->rp1_freq); + drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); rps->min_freq = chv_rps_min_freq(rps); - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", - intel_gpu_freq(rps, rps->min_freq), - rps->min_freq); + drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), rps->min_freq); vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT) | @@ -1468,6 +1463,7 @@ static void rps_work(struct work_struct *work) { struct intel_rps *rps = container_of(work, typeof(*rps), work); struct intel_gt *gt = rps_to_gt(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); bool client_boost = false; int new_freq, adj, min, max; u32 pm_iir = 0; @@ -1543,7 +1539,7 @@ static void rps_work(struct work_struct *work) new_freq = clamp_t(int, new_freq, min, max); if (intel_rps_set(rps, new_freq)) { - DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); + drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); rps->last_adj = 0; } @@ -1665,9 +1661,10 @@ void intel_rps_init(struct intel_rps *rps) sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, ¶ms, NULL); if (params & BIT(31)) { /* OC supported */ - DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", - (rps->max_freq & 0xff) * 50, - (params & 0xff) * 50); + drm_dbg(&i915->drm, + "Overclocking supported, max: %dMHz, overclock: %dMHz\n", + (rps->max_freq & 0xff) * 50, + (params & 0xff) * 50); rps->max_freq = params & 0xff; } } @@ -1675,7 +1672,9 @@ void intel_rps_init(struct intel_rps *rps) /* Finally allow us to boost to max by default */ rps->boost_freq = rps->max_freq; rps->idle_freq = rps->min_freq; - rps->cur_freq = rps->idle_freq; + + /* Start in the middle, from here we will autotune based on workload */ + rps->cur_freq = rps->efficient_freq; rps->pm_intrmsk_mbz = 0; @@ -1927,3 +1926,7 @@ bool i915_gpu_turbo_disable(void) return ret; } EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_rps.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 74f793423231..d173271c7397 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -65,7 +65,6 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, { const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; bool subslice_pg = sseu->has_subslice_pg; - struct intel_sseu ctx_sseu; u8 slices, subslices; u32 rpcs = 0; @@ -78,31 +77,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, /* * If i915/perf is active, we want a stable powergating configuration - * on the system. - * - * We could choose full enablement, but on ICL we know there are use - * cases which disable slices for functional, apart for performance - * reasons. So in this case we select a known stable subset. + * on the system. Use the configuration pinned by i915/perf. */ - if (!i915->perf.exclusive_stream) { - ctx_sseu = *req_sseu; - } else { - ctx_sseu = intel_sseu_from_device_info(sseu); - - if (IS_GEN(i915, 11)) { - /* - * We only need subslice count so it doesn't matter - * which ones we select - just turn off low bits in the - * amount of half of all available subslices per slice. - */ - ctx_sseu.subslice_mask = - ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2)); - ctx_sseu.slice_mask = 0x1; - } - } + if (i915->perf.exclusive_stream) + req_sseu = &i915->perf.sseu; - slices = hweight8(ctx_sseu.slice_mask); - subslices = hweight8(ctx_sseu.subslice_mask); + slices = hweight8(req_sseu->slice_mask); + subslices = hweight8(req_sseu->subslice_mask); /* * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits @@ -175,13 +156,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, if (sseu->has_eu_pg) { u32 val; - val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; + val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK); val &= GEN8_RPCS_EU_MIN_MASK; rpcs |= val; - val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; + val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK); val &= GEN8_RPCS_EU_MAX_MASK; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 91debbc97c9a..3779c2ae0d65 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -119,6 +119,15 @@ static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) spin_unlock_irqrestore(>->hwsp_lock, flags); } +static void __rcu_cacheline_free(struct rcu_head *rcu) +{ + struct intel_timeline_cacheline *cl = + container_of(rcu, typeof(*cl), rcu); + + i915_active_fini(&cl->active); + kfree(cl); +} + static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) { GEM_BUG_ON(!i915_active_is_idle(&cl->active)); @@ -127,8 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) i915_vma_put(cl->hwsp->vma); __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); - i915_active_fini(&cl->active); - kfree_rcu(cl, rcu); + call_rcu(&cl->rcu, __rcu_cacheline_free); } __i915_active_call diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 5176ad1a3976..adddc5c93b48 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -837,7 +837,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) & GEN10_L3BANK_MASK; - DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse); + drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse); l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse); } else { l3_en = ~0; @@ -846,7 +846,8 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) slice = fls(sseu->slice_mask) - 1; subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice)); if (!subslice) { - DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", + drm_warn(&i915->drm, + "No common index found between subslice mask %x and L3 bank mask %x!\n", intel_sseu_get_subslices(sseu, slice), l3_en); subslice = fls(l3_en); drm_WARN_ON(&i915->drm, !subslice); @@ -861,7 +862,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; } - DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr); + drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr); wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); } @@ -942,6 +943,8 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) static void tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { + wa_init_mcr(i915, wal); + /* Wa_1409420604:tgl */ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) wa_write_or(wal, @@ -1379,12 +1382,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE); - /* - * Wa_1409085225:tgl - * Wa_14010229206:tgl - */ - wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); - /* Wa_1408615072:tgl */ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); @@ -1402,6 +1399,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_masked_en(wal, GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE); + + /* + * Wa_1409085225:tgl + * Wa_14010229206:tgl + */ + wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); } if (IS_GEN(i915, 11)) { diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 09ff8e4f88af..c50bb502fe03 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -7,6 +7,7 @@ #include "selftest_llc.h" #include "selftest_rc6.h" +#include "selftest_rps.h" static int live_gt_resume(void *arg) { @@ -52,6 +53,7 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_rc6_manual), + SUBTEST(live_rps_interrupt), SUBTEST(live_gt_resume), }; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 6f06ba750a0a..6f5e35afe1b2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -68,26 +68,41 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine, engine->props.heartbeat_interval_ms = saved; } +static bool is_active(struct i915_request *rq) +{ + if (i915_request_is_active(rq)) + return true; + + if (i915_request_on_hold(rq)) + return true; + + if (i915_request_started(rq)) + return true; + + return false; +} + static int wait_for_submit(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) { timeout += jiffies; do { - cond_resched(); - intel_engine_flush_submission(engine); - - if (READ_ONCE(engine->execlists.pending[0])) - continue; + bool done = time_after(jiffies, timeout); - if (i915_request_is_active(rq)) + if (i915_request_completed(rq)) /* that was quick! */ return 0; - if (i915_request_started(rq)) /* that was quick! */ + /* Wait until the HW has acknowleged the submission (or err) */ + intel_engine_flush_submission(engine); + if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) return 0; - } while (time_before(jiffies, timeout)); - return -ETIME; + if (done) + return -ETIME; + + cond_resched(); + } while (1); } static int wait_for_reset(struct intel_engine_cs *engine, @@ -634,9 +649,9 @@ static int live_error_interrupt(void *arg) error_repr(p->error[i])); if (!i915_request_started(client[i])) { - pr_debug("%s: %s request not stated!\n", - engine->name, - error_repr(p->error[i])); + pr_err("%s: %s request not started!\n", + engine->name, + error_repr(p->error[i])); err = -ETIME; goto out; } @@ -644,9 +659,10 @@ static int live_error_interrupt(void *arg) /* Kick the tasklet to process the error */ intel_engine_flush_submission(engine); if (client[i]->fence.error != p->error[i]) { - pr_err("%s: %s request completed with wrong error code: %d\n", + pr_err("%s: %s request (%s) with wrong error code: %d\n", engine->name, error_repr(p->error[i]), + i915_request_completed(client[i]) ? "completed" : "running", client[i]->fence.error); err = -EINVAL; goto out; @@ -929,7 +945,7 @@ create_rewinder(struct intel_context *ce, goto err; } - cs = intel_ring_begin(rq, 10); + cs = intel_ring_begin(rq, 14); if (IS_ERR(cs)) { err = PTR_ERR(cs); goto err; @@ -941,8 +957,8 @@ create_rewinder(struct intel_context *ce, *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_NEQ_SDD; - *cs++ = 0; + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = idx; *cs++ = offset; *cs++ = 0; @@ -951,6 +967,11 @@ create_rewinder(struct intel_context *ce, *cs++ = offset + idx * sizeof(u32); *cs++ = 0; + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset; + *cs++ = 0; + *cs++ = idx + 1; + intel_ring_advance(rq, cs); rq->sched.attr.priority = I915_PRIORITY_MASK; @@ -984,7 +1005,7 @@ static int live_timeslice_rewind(void *arg) for_each_engine(engine, gt, id) { enum { A1, A2, B1 }; - enum { X = 1, Y, Z }; + enum { X = 1, Z, Y }; struct i915_request *rq[3] = {}; struct intel_context *ce; unsigned long heartbeat; @@ -1017,13 +1038,13 @@ static int live_timeslice_rewind(void *arg) goto err; } - rq[0] = create_rewinder(ce, NULL, slot, 1); + rq[0] = create_rewinder(ce, NULL, slot, X); if (IS_ERR(rq[0])) { intel_context_put(ce); goto err; } - rq[1] = create_rewinder(ce, NULL, slot, 2); + rq[1] = create_rewinder(ce, NULL, slot, Y); intel_context_put(ce); if (IS_ERR(rq[1])) goto err; @@ -1041,7 +1062,7 @@ static int live_timeslice_rewind(void *arg) goto err; } - rq[2] = create_rewinder(ce, rq[0], slot, 3); + rq[2] = create_rewinder(ce, rq[0], slot, Z); intel_context_put(ce); if (IS_ERR(rq[2])) goto err; @@ -1052,18 +1073,14 @@ static int live_timeslice_rewind(void *arg) engine->name); goto err; } - GEM_BUG_ON(!timer_pending(&engine->execlists.timer)); /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ - GEM_BUG_ON(!i915_request_is_active(rq[A1])); - GEM_BUG_ON(!i915_request_is_active(rq[A2])); - GEM_BUG_ON(!i915_request_is_active(rq[B1])); - - /* Wait for the timeslice to kick in */ - del_timer(&engine->execlists.timer); - tasklet_hi_schedule(&engine->execlists.tasklet); - intel_engine_flush_submission(engine); - + if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */ + /* Wait for the timeslice to kick in */ + del_timer(&engine->execlists.timer); + tasklet_hi_schedule(&engine->execlists.tasklet); + intel_engine_flush_submission(engine); + } /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ GEM_BUG_ON(!i915_request_is_active(rq[A1])); GEM_BUG_ON(!i915_request_is_active(rq[B1])); @@ -1228,8 +1245,14 @@ static int live_timeslice_queue(void *arg) if (err) goto err_rq; - intel_engine_flush_submission(engine); + /* Wait until we ack the release_queue and start timeslicing */ + do { + cond_resched(); + intel_engine_flush_submission(engine); + } while (READ_ONCE(engine->execlists.pending[0])); + if (!READ_ONCE(engine->execlists.timer.expires) && + execlists_active(&engine->execlists) == rq && !i915_request_completed(rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); @@ -2030,6 +2053,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg) if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) return 0; + if (!intel_has_reset_engine(arg->engine->gt)) + return 0; + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); rq = spinner_create_request(&arg->a.spin, arg->a.ctx, arg->engine, @@ -2630,7 +2656,7 @@ static int create_gang(struct intel_engine_cs *engine, if (IS_ERR(rq)) goto err_obj; - rq->batch = vma; + rq->batch = i915_vma_get(vma); i915_request_get(rq); i915_vma_lock(vma); @@ -2654,6 +2680,7 @@ static int create_gang(struct intel_engine_cs *engine, return 0; err_rq: + i915_vma_put(rq->batch); i915_request_put(rq); err_obj: i915_gem_object_put(obj); @@ -2750,6 +2777,7 @@ static int live_preempt_gang(void *arg) err = -ETIME; } + i915_vma_put(rq->batch); i915_request_put(rq); rq = n; } @@ -5155,7 +5183,6 @@ static int compare_isolation(struct intel_engine_cs *engine, A[0][x], B[0][x], B[1][x], poison, lrc[dw + 1]); err = -EINVAL; - break; } } dw += 2; @@ -5294,6 +5321,7 @@ static int live_lrc_isolation(void *arg) 0xffffffff, 0xffff0000, }; + int err = 0; /* * Our goal is try and verify that per-context state cannot be @@ -5304,7 +5332,6 @@ static int live_lrc_isolation(void *arg) */ for_each_engine(engine, gt, id) { - int err = 0; int i; /* Just don't even ask */ @@ -5315,23 +5342,25 @@ static int live_lrc_isolation(void *arg) intel_engine_pm_get(engine); if (engine->pinned_default_state) { for (i = 0; i < ARRAY_SIZE(poison); i++) { - err = __lrc_isolation(engine, poison[i]); - if (err) - break; + int result; - err = __lrc_isolation(engine, ~poison[i]); - if (err) - break; + result = __lrc_isolation(engine, poison[i]); + if (result && !err) + err = result; + + result = __lrc_isolation(engine, ~poison[i]); + if (result && !err) + err = result; } } intel_engine_pm_put(engine); - if (igt_flush_test(gt->i915)) + if (igt_flush_test(gt->i915)) { err = -EIO; - if (err) - return err; + break; + } } - return 0; + return err; } static void garbage_reset(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 95b165faeba7..08c3dbd41b12 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -12,6 +12,22 @@ #include "selftests/i915_random.h" +static u64 energy_uJ(struct intel_rc6 *rc6) +{ + unsigned long long power; + u32 units; + + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) + return 0; + + units = (power & 0x1f00) >> 8; + + if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power)) + return 0; + + return (1000000 * power) >> units; /* convert to uJ */ +} + static u64 rc6_residency(struct intel_rc6 *rc6) { u64 result; @@ -31,7 +47,9 @@ int live_rc6_manual(void *arg) { struct intel_gt *gt = arg; struct intel_rc6 *rc6 = >->rc6; + u64 rc0_power, rc6_power; intel_wakeref_t wakeref; + ktime_t dt; u64 res[2]; int err = 0; @@ -54,7 +72,12 @@ int live_rc6_manual(void *arg) msleep(1); /* wakeup is not immediate, takes about 100us on icl */ res[0] = rc6_residency(rc6); + + dt = ktime_get(); + rc0_power = energy_uJ(rc6); msleep(250); + rc0_power = energy_uJ(rc6) - rc0_power; + dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); if ((res[1] - res[0]) >> 10) { pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n", @@ -63,13 +86,24 @@ int live_rc6_manual(void *arg) goto out_unlock; } + rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt)); + if (!rc0_power) { + pr_err("No power measured while in RC0\n"); + err = -EINVAL; + goto out_unlock; + } + /* Manually enter RC6 */ intel_rc6_park(rc6); res[0] = rc6_residency(rc6); + intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL); + dt = ktime_get(); + rc6_power = energy_uJ(rc6); msleep(100); + rc6_power = energy_uJ(rc6) - rc6_power; + dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); - if (res[1] == res[0]) { pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n", intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE), @@ -78,6 +112,15 @@ int live_rc6_manual(void *arg) err = -EINVAL; } + rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt)); + pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n", + rc0_power, rc6_power); + if (2 * rc6_power > rc0_power) { + pr_err("GPU leaked energy while in RC6!\n"); + err = -EINVAL; + goto out_unlock; + } + /* Restore what should have been the original state! */ intel_rc6_unpark(rc6); diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c new file mode 100644 index 000000000000..26aadc2ae3be --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "intel_engine_pm.h" +#include "intel_gt_pm.h" +#include "intel_rc6.h" +#include "selftest_rps.h" +#include "selftests/igt_flush_test.h" +#include "selftests/igt_spinner.h" + +static void dummy_rps_work(struct work_struct *wrk) +{ +} + +static int __rps_up_interrupt(struct intel_rps *rps, + struct intel_engine_cs *engine, + struct igt_spinner *spin) +{ + struct intel_uncore *uncore = engine->uncore; + struct i915_request *rq; + u32 timeout; + + if (!intel_engine_can_store_dword(engine)) + return 0; + + intel_gt_pm_wait_for_idle(engine->gt); + GEM_BUG_ON(rps->active); + + rps->pm_iir = 0; + rps->cur_freq = rps->min_freq; + + rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + + if (!igt_wait_for_spinner(spin, rq)) { + pr_err("%s: RPS spinner did not start\n", + engine->name); + i915_request_put(rq); + intel_gt_set_wedged(engine->gt); + return -EIO; + } + + if (!rps->active) { + pr_err("%s: RPS not enabled on starting spinner\n", + engine->name); + igt_spinner_end(spin); + i915_request_put(rq); + return -EINVAL; + } + + if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) { + pr_err("%s: RPS did not register UP interrupt\n", + engine->name); + i915_request_put(rq); + return -EINVAL; + } + + if (rps->last_freq != rps->min_freq) { + pr_err("%s: RPS did not program min frequency\n", + engine->name); + i915_request_put(rq); + return -EINVAL; + } + + timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI); + timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout); + + usleep_range(2 * timeout, 3 * timeout); + GEM_BUG_ON(i915_request_completed(rq)); + + igt_spinner_end(spin); + i915_request_put(rq); + + if (rps->cur_freq != rps->min_freq) { + pr_err("%s: Frequency unexpectedly changed [up], now %d!\n", + engine->name, intel_rps_read_actual_frequency(rps)); + return -EINVAL; + } + + if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) { + pr_err("%s: UP interrupt not recorded for spinner, pm_iir:%x, prev_up:%x, up_threshold:%x, up_ei:%x\n", + engine->name, rps->pm_iir, + intel_uncore_read(uncore, GEN6_RP_PREV_UP), + intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD), + intel_uncore_read(uncore, GEN6_RP_UP_EI)); + return -EINVAL; + } + + intel_gt_pm_wait_for_idle(engine->gt); + return 0; +} + +static int __rps_down_interrupt(struct intel_rps *rps, + struct intel_engine_cs *engine) +{ + struct intel_uncore *uncore = engine->uncore; + u32 timeout; + + mutex_lock(&rps->lock); + GEM_BUG_ON(!rps->active); + intel_rps_set(rps, rps->max_freq); + mutex_unlock(&rps->lock); + + if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) { + pr_err("%s: RPS did not register DOWN interrupt\n", + engine->name); + return -EINVAL; + } + + if (rps->last_freq != rps->max_freq) { + pr_err("%s: RPS did not program max frequency\n", + engine->name); + return -EINVAL; + } + + timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); + timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout); + + /* Flush any previous EI */ + usleep_range(timeout, 2 * timeout); + + /* Reset the interrupt status */ + rps_disable_interrupts(rps); + GEM_BUG_ON(rps->pm_iir); + rps_enable_interrupts(rps); + + /* And then wait for the timeout, for real this time */ + usleep_range(2 * timeout, 3 * timeout); + + if (rps->cur_freq != rps->max_freq) { + pr_err("%s: Frequency unexpectedly changed [down], now %d!\n", + engine->name, + intel_rps_read_actual_frequency(rps)); + return -EINVAL; + } + + if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) { + pr_err("%s: DOWN interrupt not recorded for idle, pm_iir:%x, prev_down:%x, down_threshold:%x, down_ei:%x [prev_up:%x, up_threshold:%x, up_ei:%x]\n", + engine->name, rps->pm_iir, + intel_uncore_read(uncore, GEN6_RP_PREV_DOWN), + intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD), + intel_uncore_read(uncore, GEN6_RP_DOWN_EI), + intel_uncore_read(uncore, GEN6_RP_PREV_UP), + intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD), + intel_uncore_read(uncore, GEN6_RP_UP_EI)); + return -EINVAL; + } + + return 0; +} + +int live_rps_interrupt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_rps *rps = >->rps; + void (*saved_work)(struct work_struct *wrk); + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + u32 pm_events; + int err = 0; + + /* + * First, let's check whether or not we are receiving interrupts. + */ + + if (!rps->enabled || rps->max_freq <= rps->min_freq) + return 0; + + intel_gt_pm_get(gt); + pm_events = rps->pm_events; + intel_gt_pm_put(gt); + if (!pm_events) { + pr_err("No RPS PM events registered, but RPS is enabled?\n"); + return -ENODEV; + } + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + intel_gt_pm_wait_for_idle(gt); + saved_work = rps->work.func; + rps->work.func = dummy_rps_work; + + for_each_engine(engine, gt, id) { + /* Keep the engine busy with a spinner; expect an UP! */ + if (pm_events & GEN6_PM_RP_UP_THRESHOLD) { + err = __rps_up_interrupt(rps, engine, &spin); + if (err) + goto out; + } + + /* Keep the engine awake but idle and check for DOWN */ + if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) { + intel_engine_pm_get(engine); + intel_rc6_disable(>->rc6); + + err = __rps_down_interrupt(rps, engine); + + intel_rc6_enable(>->rc6); + intel_engine_pm_put(engine); + if (err) + goto out; + } + } + +out: + if (igt_flush_test(gt->i915)) + err = -EIO; + + igt_spinner_fini(&spin); + + intel_gt_pm_wait_for_idle(gt); + rps->work.func = saved_work; + + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h new file mode 100644 index 000000000000..abba66420996 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_rps.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef SELFTEST_RPS_H +#define SELFTEST_RPS_H + +int live_rps_interrupt(void *arg); + +#endif /* SELFTEST_RPS_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 819f09ef51fc..861657897c0f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -169,7 +169,7 @@ void intel_guc_init_early(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_gt(guc)->i915; - intel_guc_fw_init_early(guc); + intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC); intel_guc_ct_init_early(&guc->ct); intel_guc_log_init_early(&guc->log); intel_guc_submission_init_early(guc); @@ -723,3 +723,47 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, return 0; } + +/** + * intel_guc_load_status - dump information about GuC load status + * @guc: the GuC + * @p: the &drm_printer + * + * Pretty printer for GuC load status. + */ +void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_uncore *uncore = gt->uncore; + intel_wakeref_t wakeref; + + if (!intel_guc_is_supported(guc)) { + drm_printf(p, "GuC not supported\n"); + return; + } + + if (!intel_guc_is_wanted(guc)) { + drm_printf(p, "GuC disabled\n"); + return; + } + + intel_uc_fw_dump(&guc->fw, p); + + with_intel_runtime_pm(uncore->rpm, wakeref) { + u32 status = intel_uncore_read(uncore, GUC_STATUS); + u32 i; + + drm_printf(p, "\nGuC status 0x%08x:\n", status); + drm_printf(p, "\tBootrom status = 0x%x\n", + (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); + drm_printf(p, "\tuKernel status = 0x%x\n", + (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); + drm_printf(p, "\tMIA Core status = 0x%x\n", + (status & GS_MIA_MASK) >> GS_MIA_SHIFT); + drm_puts(p, "\nScratch registers:\n"); + for (i = 0; i < 16; i++) { + drm_printf(p, "\t%2d: \t0x%x\n", + i, intel_uncore_read(uncore, SOFT_SCRATCH(i))); + } + } +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 4594ccbeaa34..e84ab67b317d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -74,6 +74,11 @@ struct intel_guc { struct mutex send_mutex; }; +static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) +{ + return container_of(log, struct intel_guc, log); +} + static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) { @@ -190,4 +195,6 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) int intel_guc_reset_engine(struct intel_guc *guc, struct intel_engine_cs *engine); +void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c new file mode 100644 index 000000000000..fe7cb7b29a1e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <drm/drm_print.h> + +#include "gt/debugfs_gt.h" +#include "intel_guc.h" +#include "intel_guc_debugfs.h" +#include "intel_guc_log_debugfs.h" + +static int guc_info_show(struct seq_file *m, void *data) +{ + struct intel_guc *guc = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + if (!intel_guc_is_supported(guc)) + return -ENODEV; + + intel_guc_load_status(guc, &p); + drm_puts(&p, "\n"); + intel_guc_log_info(&guc->log, &p); + + /* Add more as required ... */ + + return 0; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info); + +void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root) +{ + static const struct debugfs_gt_file files[] = { + { "guc_info", &guc_info_fops, NULL }, + }; + + if (!intel_guc_is_supported(guc)) + return; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc); + intel_guc_log_debugfs_register(&guc->log, root); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h new file mode 100644 index 000000000000..424c26665cf1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef DEBUGFS_GUC_H +#define DEBUGFS_GUC_H + +struct intel_guc; +struct dentry; + +void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root); + +#endif /* DEBUGFS_GUC_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 3a1c47d600ea..d4a87f4c9421 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -13,20 +13,6 @@ #include "intel_guc_fw.h" #include "i915_drv.h" -/** - * intel_guc_fw_init_early() - initializes GuC firmware struct - * @guc: intel_guc struct - * - * On platforms with GuC selects firmware for uploading - */ -void intel_guc_fw_init_early(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; - - intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915), - INTEL_INFO(i915)->platform, INTEL_REVID(i915)); -} - static void guc_prepare_xfer(struct intel_uncore *uncore) { u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES | diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h index b5ab639d7259..0b4d2a9c9435 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h @@ -8,7 +8,6 @@ struct intel_guc; -void intel_guc_fw_init_early(struct intel_guc *guc); int intel_guc_fw_upload(struct intel_guc *guc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index caed0d57e704..fb10f3597ea5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -55,11 +55,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable, return intel_guc_send(guc, action, ARRAY_SIZE(action)); } -static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) -{ - return container_of(log, struct intel_guc, log); -} - static void guc_log_enable_flush_events(struct intel_guc_log *log) { intel_guc_enable_msg(log_to_guc(log), @@ -672,3 +667,95 @@ void intel_guc_log_handle_flush_event(struct intel_guc_log *log) { queue_work(system_highpri_wq, &log->relay.flush_work); } + +static const char * +stringify_guc_log_type(enum guc_log_buffer_type type) +{ + switch (type) { + case GUC_ISR_LOG_BUFFER: + return "ISR"; + case GUC_DPC_LOG_BUFFER: + return "DPC"; + case GUC_CRASH_DUMP_LOG_BUFFER: + return "CRASH"; + default: + MISSING_CASE(type); + } + + return ""; +} + +/** + * intel_guc_log_info - dump information about GuC log relay + * @log: the GuC log + * @p: the &drm_printer + * + * Pretty printer for GuC log info + */ +void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p) +{ + enum guc_log_buffer_type type; + + if (!intel_guc_log_relay_created(log)) { + drm_puts(p, "GuC log relay not created\n"); + return; + } + + drm_puts(p, "GuC logging stats:\n"); + + drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count); + + for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { + drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n", + stringify_guc_log_type(type), + log->stats[type].flush, + log->stats[type].sampled_overflow); + } +} + +/** + * intel_guc_log_dump - dump the contents of the GuC log + * @log: the GuC log + * @p: the &drm_printer + * @dump_load_err: dump the log saved on GuC load error + * + * Pretty printer for the GuC log + */ +int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p, + bool dump_load_err) +{ + struct intel_guc *guc = log_to_guc(log); + struct intel_uc *uc = container_of(guc, struct intel_uc, guc); + struct drm_i915_gem_object *obj = NULL; + u32 *map; + int i = 0; + + if (!intel_guc_is_supported(guc)) + return -ENODEV; + + if (dump_load_err) + obj = uc->load_err_log; + else if (guc->log.vma) + obj = guc->log.vma->obj; + + if (!obj) + return 0; + + map = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(map)) { + DRM_DEBUG("Failed to pin object\n"); + drm_puts(p, "(log data unaccessible)\n"); + return PTR_ERR(map); + } + + for (i = 0; i < obj->base.size / sizeof(u32); i += 4) + drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *(map + i), *(map + i + 1), + *(map + i + 2), *(map + i + 3)); + + drm_puts(p, "\n"); + + i915_gem_object_unpin_map(obj); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index c252c022c5fc..11fccd0b2294 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -79,4 +79,8 @@ static inline u32 intel_guc_log_get_level(struct intel_guc_log *log) return log->level; } +void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p); +int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p, + bool dump_load_err); + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c new file mode 100644 index 000000000000..129e0cf7dfe2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/fs.h> +#include <drm/drm_print.h> + +#include "gt/debugfs_gt.h" +#include "intel_guc.h" +#include "intel_guc_log.h" +#include "intel_guc_log_debugfs.h" + +static int guc_log_dump_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + + return intel_guc_log_dump(m->private, &p, false); +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump); + +static int guc_load_err_log_dump_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + + return intel_guc_log_dump(m->private, &p, true); +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump); + +static int guc_log_level_get(void *data, u64 *val) +{ + struct intel_guc_log *log = data; + + if (!intel_guc_is_used(log_to_guc(log))) + return -ENODEV; + + *val = intel_guc_log_get_level(log); + + return 0; +} + +static int guc_log_level_set(void *data, u64 val) +{ + struct intel_guc_log *log = data; + + if (!intel_guc_is_used(log_to_guc(log))) + return -ENODEV; + + return intel_guc_log_set_level(log, val); +} + +DEFINE_SIMPLE_ATTRIBUTE(guc_log_level_fops, + guc_log_level_get, guc_log_level_set, + "%lld\n"); + +static int guc_log_relay_open(struct inode *inode, struct file *file) +{ + struct intel_guc_log *log = inode->i_private; + + if (!intel_guc_is_ready(log_to_guc(log))) + return -ENODEV; + + file->private_data = log; + + return intel_guc_log_relay_open(log); +} + +static ssize_t +guc_log_relay_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct intel_guc_log *log = filp->private_data; + int val; + int ret; + + ret = kstrtoint_from_user(ubuf, cnt, 0, &val); + if (ret < 0) + return ret; + + /* + * Enable and start the guc log relay on value of 1. + * Flush log relay for any other value. + */ + if (val == 1) + ret = intel_guc_log_relay_start(log); + else + intel_guc_log_relay_flush(log); + + return ret ?: cnt; +} + +static int guc_log_relay_release(struct inode *inode, struct file *file) +{ + struct intel_guc_log *log = inode->i_private; + + intel_guc_log_relay_close(log); + return 0; +} + +static const struct file_operations guc_log_relay_fops = { + .owner = THIS_MODULE, + .open = guc_log_relay_open, + .write = guc_log_relay_write, + .release = guc_log_relay_release, +}; + +void intel_guc_log_debugfs_register(struct intel_guc_log *log, + struct dentry *root) +{ + static const struct debugfs_gt_file files[] = { + { "guc_log_dump", &guc_log_dump_fops, NULL }, + { "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL }, + { "guc_log_level", &guc_log_level_fops, NULL }, + { "guc_log_relay", &guc_log_relay_fops, NULL }, + }; + + if (!intel_guc_is_supported(log_to_guc(log))) + return; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log); +} + diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h new file mode 100644 index 000000000000..e8900e3d74ea --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef DEBUGFS_GUC_LOG_H +#define DEBUGFS_GUC_LOG_H + +struct intel_guc_log; +struct dentry; + +void intel_guc_log_debugfs_register(struct intel_guc_log *log, + struct dentry *root); + +#endif /* DEBUGFS_GUC_LOG_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index a74b65694512..65eeb44b397d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -41,7 +41,7 @@ void intel_huc_init_early(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_gt(huc)->i915; - intel_huc_fw_init_early(huc); + intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC); if (INTEL_GEN(i915) >= 11) { huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; @@ -200,9 +200,13 @@ fail: * This function reads status register to verify if HuC * firmware was successfully loaded. * - * Returns: 1 if HuC firmware is loaded and verified, - * 0 if HuC firmware is not loaded and -ENODEV if HuC - * is not present on this platform. + * Returns: + * * -ENODEV if HuC is not present on this platform, + * * -EOPNOTSUPP if HuC firmware is disabled, + * * -ENOPKG if HuC firmware was not installed, + * * -ENOEXEC if HuC firmware is invalid or mismatched, + * * 0 if HuC firmware is not running, + * * 1 if HuC firmware is authenticated and running. */ int intel_huc_check_status(struct intel_huc *huc) { @@ -210,11 +214,50 @@ int intel_huc_check_status(struct intel_huc *huc) intel_wakeref_t wakeref; u32 status = 0; - if (!intel_huc_is_supported(huc)) + switch (__intel_uc_fw_status(&huc->fw)) { + case INTEL_UC_FIRMWARE_NOT_SUPPORTED: return -ENODEV; + case INTEL_UC_FIRMWARE_DISABLED: + return -EOPNOTSUPP; + case INTEL_UC_FIRMWARE_MISSING: + return -ENOPKG; + case INTEL_UC_FIRMWARE_ERROR: + return -ENOEXEC; + default: + break; + } with_intel_runtime_pm(gt->uncore->rpm, wakeref) status = intel_uncore_read(gt->uncore, huc->status.reg); return (status & huc->status.mask) == huc->status.value; } + +/** + * intel_huc_load_status - dump information about HuC load status + * @huc: the HuC + * @p: the &drm_printer + * + * Pretty printer for HuC load status. + */ +void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p) +{ + struct intel_gt *gt = huc_to_gt(huc); + intel_wakeref_t wakeref; + + if (!intel_huc_is_supported(huc)) { + drm_printf(p, "HuC not supported\n"); + return; + } + + if (!intel_huc_is_wanted(huc)) { + drm_printf(p, "HuC disabled\n"); + return; + } + + intel_uc_fw_dump(&huc->fw, p); + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + drm_printf(p, "HuC status: 0x%08x\n", + intel_uncore_read(gt->uncore, huc->status.reg)); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index a40b9cfc6c22..daee43b661d4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -57,4 +57,6 @@ static inline bool intel_huc_is_authenticated(struct intel_huc *huc) return intel_uc_fw_is_running(&huc->fw); } +void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p); + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c new file mode 100644 index 000000000000..5733c15fd123 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <drm/drm_print.h> + +#include "gt/debugfs_gt.h" +#include "intel_huc.h" +#include "intel_huc_debugfs.h" + +static int huc_info_show(struct seq_file *m, void *data) +{ + struct intel_huc *huc = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + if (!intel_huc_is_supported(huc)) + return -ENODEV; + + intel_huc_load_status(huc, &p); + + return 0; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info); + +void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root) +{ + static const struct debugfs_gt_file files[] = { + { "huc_info", &huc_info_fops, NULL }, + }; + + if (!intel_huc_is_supported(huc)) + return; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), huc); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h new file mode 100644 index 000000000000..be79e992f976 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef DEBUGFS_HUC_H +#define DEBUGFS_HUC_H + +struct intel_huc; +struct dentry; + +void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root); + +#endif /* DEBUGFS_HUC_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 9cdf4cbe691c..e5ef509c70e8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -8,23 +8,6 @@ #include "i915_drv.h" /** - * intel_huc_fw_init_early() - initializes HuC firmware struct - * @huc: intel_huc struct - * - * On platforms with HuC selects firmware for uploading - */ -void intel_huc_fw_init_early(struct intel_huc *huc) -{ - struct intel_gt *gt = huc_to_gt(huc); - struct intel_uc *uc = >->uc; - struct drm_i915_private *i915 = gt->i915; - - intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, - intel_uc_wants_guc(uc), - INTEL_INFO(i915)->platform, INTEL_REVID(i915)); -} - -/** * intel_huc_fw_upload() - load HuC uCode to device * @huc: intel_huc structure * diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h index b791269ce923..12f264ee3e0b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h @@ -8,7 +8,6 @@ struct intel_huc; -void intel_huc_fw_init_early(struct intel_huc *huc); int intel_huc_fw_upload(struct intel_huc *huc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index a4cbe06e06bd..f518fe05c6f9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -45,12 +45,12 @@ static void __confirm_options(struct intel_uc *uc) { struct drm_i915_private *i915 = uc_to_gt(uc)->i915; - DRM_DEV_DEBUG_DRIVER(i915->drm.dev, - "enable_guc=%d (guc:%s submission:%s huc:%s)\n", - i915_modparams.enable_guc, - yesno(intel_uc_wants_guc(uc)), - yesno(intel_uc_wants_guc_submission(uc)), - yesno(intel_uc_wants_huc(uc))); + drm_dbg(&i915->drm, + "enable_guc=%d (guc:%s submission:%s huc:%s)\n", + i915_modparams.enable_guc, + yesno(intel_uc_wants_guc(uc)), + yesno(intel_uc_wants_guc_submission(uc)), + yesno(intel_uc_wants_huc(uc))); if (i915_modparams.enable_guc == -1) return; @@ -63,25 +63,25 @@ static void __confirm_options(struct intel_uc *uc) } if (!intel_uc_supports_guc(uc)) - dev_info(i915->drm.dev, + drm_info(&i915->drm, "Incompatible option enable_guc=%d - %s\n", i915_modparams.enable_guc, "GuC is not supported!"); if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC && !intel_uc_supports_huc(uc)) - dev_info(i915->drm.dev, + drm_info(&i915->drm, "Incompatible option enable_guc=%d - %s\n", i915_modparams.enable_guc, "HuC is not supported!"); if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION && !intel_uc_supports_guc_submission(uc)) - dev_info(i915->drm.dev, + drm_info(&i915->drm, "Incompatible option enable_guc=%d - %s\n", i915_modparams.enable_guc, "GuC submission is N/A"); if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION | ENABLE_GUC_LOAD_HUC)) - dev_info(i915->drm.dev, + drm_info(&i915->drm, "Incompatible option enable_guc=%d - %s\n", i915_modparams.enable_guc, "undocumented flag"); } @@ -131,6 +131,13 @@ static void __uc_free_load_err_log(struct intel_uc *uc) i915_gem_object_put(log); } +void intel_uc_driver_remove(struct intel_uc *uc) +{ + intel_uc_fini_hw(uc); + intel_uc_fini(uc); + __uc_free_load_err_log(uc); +} + static inline bool guc_communication_enabled(struct intel_guc *guc) { return intel_guc_ct_enabled(&guc->ct); @@ -311,8 +318,6 @@ static void __uc_fini(struct intel_uc *uc) { intel_huc_fini(&uc->huc); intel_guc_fini(&uc->guc); - - __uc_free_load_err_log(uc); } static int __uc_sanitize(struct intel_uc *uc) @@ -475,14 +480,14 @@ static int __uc_init_hw(struct intel_uc *uc) if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_enable(guc); - dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", + drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n", intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path, guc->fw.major_ver_found, guc->fw.minor_ver_found, "submission", enableddisabled(intel_uc_uses_guc_submission(uc))); if (intel_uc_uses_huc(uc)) { - dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", + drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n", intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC), huc->fw.path, huc->fw.major_ver_found, huc->fw.minor_ver_found, @@ -503,7 +508,7 @@ err_out: __uc_sanitize(uc); if (!ret) { - dev_notice(i915->drm.dev, "GuC is uninitialized\n"); + drm_notice(&i915->drm, "GuC is uninitialized\n"); /* We want to run without GuC submission */ return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 5ae7b50b7dc1..9c954c589edf 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -34,6 +34,7 @@ struct intel_uc { void intel_uc_init_early(struct intel_uc *uc); void intel_uc_driver_late_release(struct intel_uc *uc); +void intel_uc_driver_remove(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc); void intel_uc_reset_prepare(struct intel_uc *uc); void intel_uc_suspend(struct intel_uc *uc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c new file mode 100644 index 000000000000..9d16b784aa0d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/debugfs.h> + +#include "intel_guc_debugfs.h" +#include "intel_huc_debugfs.h" +#include "intel_uc.h" +#include "intel_uc_debugfs.h" + +void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root) +{ + struct dentry *root; + + if (!gt_root) + return; + + /* GuC and HuC go always in pair, no need to check both */ + if (!intel_uc_supports_guc(uc)) + return; + + root = debugfs_create_dir("uc", gt_root); + if (IS_ERR(root)) + return; + + intel_guc_debugfs_register(&uc->guc, root); + intel_huc_debugfs_register(&uc->huc, root); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h new file mode 100644 index 000000000000..010ce250d223 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef DEBUGFS_UC_H +#define DEBUGFS_UC_H + +struct intel_uc; +struct dentry; + +void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root); + +#endif /* DEBUGFS_UC_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 18c755203688..e1caae93996d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -11,26 +11,32 @@ #include "intel_uc_fw_abi.h" #include "i915_drv.h" -static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw) +static inline struct intel_gt * +____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type) { - GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); - if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) + if (type == INTEL_UC_FW_TYPE_GUC) return container_of(uc_fw, struct intel_gt, uc.guc.fw); - GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC); + GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC); return container_of(uc_fw, struct intel_gt, uc.huc.fw); } +static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw) +{ + GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); + return ____uc_fw_to_gt(uc_fw, uc_fw->type); +} + #ifdef CONFIG_DRM_I915_DEBUG_GUC void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, enum intel_uc_fw_status status) { uc_fw->__status = status; - DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev, - "%s firmware -> %s\n", - intel_uc_fw_type_repr(uc_fw->type), - status == INTEL_UC_FIRMWARE_SELECTED ? - uc_fw->path : intel_uc_fw_status_repr(status)); + drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm, + "%s firmware -> %s\n", + intel_uc_fw_type_repr(uc_fw->type), + status == INTEL_UC_FIRMWARE_SELECTED ? + uc_fw->path : intel_uc_fw_status_repr(status)); } #endif @@ -187,17 +193,15 @@ static void __uc_fw_user_override(struct intel_uc_fw *uc_fw) * intel_uc_fw_init_early - initialize the uC object and select the firmware * @uc_fw: uC firmware * @type: type of uC - * @supported: is uC support possible - * @platform: platform identifier - * @rev: hardware revision * * Initialize the state of our uC object and relevant tracking and select the * firmware to fetch and load. */ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, - enum intel_uc_fw_type type, bool supported, - enum intel_platform platform, u8 rev) + enum intel_uc_fw_type type) { + struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915; + /* * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status * before we're looked at the HW caps to see if we have uc support @@ -208,8 +212,10 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, uc_fw->type = type; - if (supported) { - __uc_fw_auto_select(uc_fw, platform, rev); + if (HAS_GT_UC(i915)) { + __uc_fw_auto_select(uc_fw, + INTEL_INFO(i915)->platform, + INTEL_REVID(i915)); __uc_fw_user_override(uc_fw); } @@ -290,7 +296,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) /* Check the size of the blob before examining buffer contents */ if (unlikely(fw->size < sizeof(struct uc_css_header))) { - dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n", + drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, fw->size, sizeof(struct uc_css_header)); err = -ENODATA; @@ -303,7 +309,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - css->exponent_size_dw) * sizeof(u32); if (unlikely(size != sizeof(struct uc_css_header))) { - dev_warn(dev, + drm_warn(&i915->drm, "%s firmware %s: unexpected header size: %zu != %zu\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, fw->size, sizeof(struct uc_css_header)); @@ -316,7 +322,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) /* now RSA */ if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) { - dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n", + drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, css->key_size_dw, UOS_RSA_SCRATCH_COUNT); err = -EPROTO; @@ -327,7 +333,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) /* At least, it should have header, uCode and RSA. Size of all three. */ size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size; if (unlikely(fw->size < size)) { - dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n", + drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, fw->size, size); err = -ENOEXEC; @@ -337,7 +343,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) /* Sanity check whether this fw is not larger than whole WOPCM memory */ size = __intel_uc_fw_get_upload_size(uc_fw); if (unlikely(size >= i915->wopcm.size)) { - dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n", + drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, size, (size_t)i915->wopcm.size); err = -E2BIG; @@ -352,7 +358,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { - dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", + drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, uc_fw->major_ver_found, uc_fw->minor_ver_found, uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); @@ -380,9 +386,9 @@ fail: INTEL_UC_FIRMWARE_MISSING : INTEL_UC_FIRMWARE_ERROR); - dev_notice(dev, "%s firmware %s: fetch failed with error %d\n", + drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - dev_info(dev, "%s firmware(s) can be downloaded from %s\n", + drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n", intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); release_firmware(fw); /* OK even if fw is NULL */ @@ -467,7 +473,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) /* Wait for DMA to finish */ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); if (ret) - dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n", + drm_err(>->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n", intel_uc_fw_type_repr(uc_fw->type), intel_uncore_read_fw(uncore, DMA_CTRL)); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 888ff0de0244..23d3a423ac0f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -239,8 +239,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) } void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, - enum intel_uc_fw_type type, bool supported, - enum intel_platform platform, u8 rev); + enum intel_uc_fw_type type); int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 8b13f091cee2..0d6d59871308 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -35,7 +35,7 @@ */ #include "i915_drv.h" -#include "i915_gem_fence_reg.h" +#include "gt/intel_ggtt_fencing.h" #include "gvt.h" static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 9e065ad0658f..a3cc080a46c6 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -164,6 +164,7 @@ struct decode_info { #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01) #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02) #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04) +#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03) #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B) @@ -967,18 +968,6 @@ static int cmd_handler_lri(struct parser_exec_state *s) { int i, ret = 0; int cmd_len = cmd_length(s); - u32 valid_len = CMD_LEN(1); - - /* - * Official intel docs are somewhat sloppy , check the definition of - * MI_LOAD_REGISTER_IMM. - */ - #define MAX_VALID_LEN 127 - if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) { - gvt_err("len is not valid: len=%u valid_len=%u\n", - cmd_len, valid_len); - return -EFAULT; - } for (i = 1; i < cmd_len; i += 2) { if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { @@ -2485,6 +2474,9 @@ static const struct cmd_info cmd_info[] = { {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(1), 8, NULL}, + {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS, + F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL}, + {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6e5c9885d9fe..a83df2f84eb9 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -221,7 +221,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -241,7 +241,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -261,7 +261,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0182e2a5acff..2faf50e1b051 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -462,11 +462,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -/* ascendingly sorted */ +/* sorted in ascending order */ static i915_reg_t force_nonpriv_white_list[] = { + _MMIO(0xd80), GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) - PS_INVOCATION_COUNT,//_MMIO(0x2348) + CL_PRIMITIVES_COUNT, //_MMIO(0x2340) + PS_INVOCATION_COUNT, //_MMIO(0x2348) + PS_DEPTH_COUNT, //_MMIO(0x2350) GEN8_CS_CHICKEN1,//_MMIO(0x2580) _MMIO(0x2690), _MMIO(0x2694), @@ -491,6 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = { _MMIO(0xe18c), _MMIO(0xe48c), _MMIO(0xe5f4), + _MMIO(0x64844), }; /* a simple bsearch */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 074c4efb58eb..eee530453aa6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -131,6 +131,7 @@ struct kvmgt_vdev { struct work_struct release_work; atomic_t released; struct vfio_device *vfio_device; + struct vfio_group *vfio_group; }; static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) @@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); int total_pages; int npage; int ret; @@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, for (npage = 0; npage < total_pages; npage++) { unsigned long cur_gfn = gfn + npage; - ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1); + ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); drm_WARN_ON(&i915->drm, ret != 1); } } @@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, struct page **page) { + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long base_pfn = 0; int total_pages; int npage; @@ -183,8 +186,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long cur_gfn = gfn + npage; unsigned long pfn; - ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); + ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, + IOMMU_READ | IOMMU_WRITE, &pfn); if (ret != 1) { gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", cur_gfn, ret); @@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev) struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long events; int ret; + struct vfio_group *vfio_group; vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; @@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev) goto undo_iommu; } + vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev)); + if (IS_ERR_OR_NULL(vfio_group)) { + ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); + gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); + goto undo_register; + } + vdev->vfio_group = vfio_group; + /* Take a module reference as mdev core doesn't take * a reference for vendor driver. */ @@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev) return ret; undo_group: + vfio_group_put_external_user(vdev->vfio_group); + vdev->vfio_group = NULL; + +undo_register: vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &vdev->group_notifier); @@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) kvmgt_guest_exit(info); intel_vgpu_release_msi_eventfd_ctx(vgpu); + vfio_group_put_external_user(vdev->vfio_group); vdev->kvm = NULL; vgpu->handle = 0; @@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, void *buf, unsigned long len, bool write) { struct kvmgt_guest_info *info; - struct kvm *kvm; - int idx, ret; - bool kthread = current->mm == NULL; if (!handle_valid(handle)) return -ESRCH; info = (struct kvmgt_guest_info *)handle; - kvm = info->kvm; - - if (kthread) { - if (!mmget_not_zero(kvm->mm)) - return -EFAULT; - use_mm(kvm->mm); - } - - idx = srcu_read_lock(&kvm->srcu); - ret = write ? kvm_write_guest(kvm, gpa, buf, len) : - kvm_read_guest(kvm, gpa, buf, len); - srcu_read_unlock(&kvm->srcu, idx); - - if (kthread) { - unuse_mm(kvm->mm); - mmput(kvm->mm); - } - return ret; + return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, + gpa, buf, len, write); } static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1c95bf8cbed0..cb11c3184085 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -296,8 +296,8 @@ shadow_context_descriptor_update(struct intel_context *ce, * Update bits 0-11 of the context descriptor which includes flags * like GEN8_CTX_* cached in desc_template */ - desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); - desc |= workload->ctx_desc.addressing_mode << + desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT); + desc |= (u64)workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; ce->lrc_desc = desc; diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index c4048628188a..d960d0be5bd2 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -496,7 +496,7 @@ static int flush_lazy_signals(struct i915_active *ref) return err; } -int i915_active_wait(struct i915_active *ref) +int __i915_active_wait(struct i915_active *ref, int state) { int err; @@ -511,7 +511,9 @@ int i915_active_wait(struct i915_active *ref) if (err) return err; - if (wait_var_event_interruptible(ref, i915_active_is_idle(ref))) + if (!i915_active_is_idle(ref) && + ___wait_var_event(ref, i915_active_is_idle(ref), + state, 0, 0, schedule())) return -EINTR; flush_work(&ref->work); @@ -540,34 +542,88 @@ static int __await_active(struct i915_active_fence *active, return 0; } +struct wait_barrier { + struct wait_queue_entry base; + struct i915_active *ref; +}; + +static int +barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key) +{ + struct wait_barrier *wb = container_of(wq, typeof(*wb), base); + + if (i915_active_is_idle(wb->ref)) { + list_del(&wq->entry); + i915_sw_fence_complete(wq->private); + kfree(wq); + } + + return 0; +} + +static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence) +{ + struct wait_barrier *wb; + + wb = kmalloc(sizeof(*wb), GFP_KERNEL); + if (unlikely(!wb)) + return -ENOMEM; + + GEM_BUG_ON(i915_active_is_idle(ref)); + if (!i915_sw_fence_await(fence)) { + kfree(wb); + return -EINVAL; + } + + wb->base.flags = 0; + wb->base.func = barrier_wake; + wb->base.private = fence; + wb->ref = ref; + + add_wait_queue(__var_waitqueue(ref), &wb->base); + return 0; +} + static int await_active(struct i915_active *ref, unsigned int flags, int (*fn)(void *arg, struct dma_fence *fence), - void *arg) + void *arg, struct i915_sw_fence *barrier) { int err = 0; - /* We must always wait for the exclusive fence! */ - if (rcu_access_pointer(ref->excl.fence)) { + if (!i915_active_acquire_if_busy(ref)) + return 0; + + if (flags & I915_ACTIVE_AWAIT_EXCL && + rcu_access_pointer(ref->excl.fence)) { err = __await_active(&ref->excl, fn, arg); if (err) - return err; + goto out; } - if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) { + if (flags & I915_ACTIVE_AWAIT_ACTIVE) { struct active_node *it, *n; rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { err = __await_active(&it->base, fn, arg); if (err) - break; + goto out; } - i915_active_release(ref); + } + + if (flags & I915_ACTIVE_AWAIT_BARRIER) { + err = flush_lazy_signals(ref); if (err) - return err; + goto out; + + err = __await_barrier(ref, barrier); + if (err) + goto out; } - return 0; +out: + i915_active_release(ref); + return err; } static int rq_await_fence(void *arg, struct dma_fence *fence) @@ -579,7 +635,7 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref, unsigned int flags) { - return await_active(ref, flags, rq_await_fence, rq); + return await_active(ref, flags, rq_await_fence, rq, &rq->submit); } static int sw_await_fence(void *arg, struct dma_fence *fence) @@ -592,7 +648,7 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence, struct i915_active *ref, unsigned int flags) { - return await_active(ref, flags, sw_await_fence, fence); + return await_active(ref, flags, sw_await_fence, fence, fence); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) @@ -818,7 +874,7 @@ void i915_active_acquire_barrier(struct i915_active *ref) GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); llist_add(barrier_to_ll(node), &engine->barrier_tasks); - intel_engine_pm_put(engine); + intel_engine_pm_put_delay(engine, 1); } } @@ -937,6 +993,59 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) active_fence_cb(fence, cb); } +struct auto_active { + struct i915_active base; + struct kref ref; +}; + +struct i915_active *i915_active_get(struct i915_active *ref) +{ + struct auto_active *aa = container_of(ref, typeof(*aa), base); + + kref_get(&aa->ref); + return &aa->base; +} + +static void auto_release(struct kref *ref) +{ + struct auto_active *aa = container_of(ref, typeof(*aa), ref); + + i915_active_fini(&aa->base); + kfree(aa); +} + +void i915_active_put(struct i915_active *ref) +{ + struct auto_active *aa = container_of(ref, typeof(*aa), base); + + kref_put(&aa->ref, auto_release); +} + +static int auto_active(struct i915_active *ref) +{ + i915_active_get(ref); + return 0; +} + +static void auto_retire(struct i915_active *ref) +{ + i915_active_put(ref); +} + +struct i915_active *i915_active_create(void) +{ + struct auto_active *aa; + + aa = kmalloc(sizeof(*aa), GFP_KERNEL); + if (!aa) + return NULL; + + kref_init(&aa->ref); + i915_active_init(&aa->base, auto_active, auto_retire); + + return &aa->base; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_active.c" #endif diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index b3282ae7913c..cf4058150966 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -181,7 +181,11 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref) return rcu_access_pointer(ref->excl.fence); } -int i915_active_wait(struct i915_active *ref); +int __i915_active_wait(struct i915_active *ref, int state); +static inline int i915_active_wait(struct i915_active *ref) +{ + return __i915_active_wait(ref, TASK_INTERRUPTIBLE); +} int i915_sw_fence_await_active(struct i915_sw_fence *fence, struct i915_active *ref, @@ -189,7 +193,9 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence, int i915_request_await_active(struct i915_request *rq, struct i915_active *ref, unsigned int flags); -#define I915_ACTIVE_AWAIT_ALL BIT(0) +#define I915_ACTIVE_AWAIT_EXCL BIT(0) +#define I915_ACTIVE_AWAIT_ACTIVE BIT(1) +#define I915_ACTIVE_AWAIT_BARRIER BIT(2) int i915_active_acquire(struct i915_active *ref); bool i915_active_acquire_if_busy(struct i915_active *ref); @@ -221,4 +227,8 @@ void i915_request_add_active_barriers(struct i915_request *rq); void i915_active_print(struct i915_active *ref, struct drm_printer *m); void i915_active_unlock_wait(struct i915_active *ref); +struct i915_active *i915_active_create(void); +struct i915_active *i915_active_get(struct i915_active *ref); +void i915_active_put(struct i915_active *ref); + #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6ca797128aa1..aa35a59f1c7d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -37,7 +37,6 @@ #include "gt/intel_reset.h" #include "gt/intel_rc6.h" #include "gt/intel_rps.h" -#include "gt/uc/intel_guc_submission.h" #include "i915_debugfs.h" #include "i915_debugfs_params.h" @@ -218,7 +217,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) struct file_stats { struct i915_address_space *vm; unsigned long count; - u64 total, unbound; + u64 total; u64 active, inactive; u64 closed; }; @@ -234,8 +233,6 @@ static int per_file_stats(int id, void *ptr, void *data) stats->count++; stats->total += obj->base.size; - if (!atomic_read(&obj->bind_count)) - stats->unbound += obj->base.size; spin_lock(&obj->vma.lock); if (!stats->vm) { @@ -285,13 +282,12 @@ static int per_file_stats(int id, void *ptr, void *data) #define print_file_stats(m, name, stats) do { \ if (stats.count) \ - seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \ + seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \ name, \ stats.count, \ stats.total, \ stats.active, \ stats.inactive, \ - stats.unbound, \ stats.closed); \ } while (0) @@ -745,7 +741,7 @@ i915_error_state_write(struct file *filp, if (!error) return 0; - DRM_DEBUG_DRIVER("Resetting error state\n"); + drm_dbg(&error->i915->drm, "Resetting error state\n"); i915_reset_error_state(error->i915); return cnt; @@ -1251,286 +1247,6 @@ static int i915_llc(struct seq_file *m, void *data) return 0; } -static int i915_huc_load_status_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - intel_wakeref_t wakeref; - struct drm_printer p; - - if (!HAS_GT_UC(dev_priv)) - return -ENODEV; - - p = drm_seq_file_printer(m); - intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p); - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); - - return 0; -} - -static int i915_guc_load_status_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - intel_wakeref_t wakeref; - struct drm_printer p; - - if (!HAS_GT_UC(dev_priv)) - return -ENODEV; - - p = drm_seq_file_printer(m); - intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p); - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - u32 tmp = I915_READ(GUC_STATUS); - u32 i; - - seq_printf(m, "\nGuC status 0x%08x:\n", tmp); - seq_printf(m, "\tBootrom status = 0x%x\n", - (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); - seq_printf(m, "\tuKernel status = 0x%x\n", - (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); - seq_printf(m, "\tMIA Core status = 0x%x\n", - (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); - seq_puts(m, "\nScratch registers:\n"); - for (i = 0; i < 16; i++) { - seq_printf(m, "\t%2d: \t0x%x\n", - i, I915_READ(SOFT_SCRATCH(i))); - } - } - - return 0; -} - -static const char * -stringify_guc_log_type(enum guc_log_buffer_type type) -{ - switch (type) { - case GUC_ISR_LOG_BUFFER: - return "ISR"; - case GUC_DPC_LOG_BUFFER: - return "DPC"; - case GUC_CRASH_DUMP_LOG_BUFFER: - return "CRASH"; - default: - MISSING_CASE(type); - } - - return ""; -} - -static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log) -{ - enum guc_log_buffer_type type; - - if (!intel_guc_log_relay_created(log)) { - seq_puts(m, "GuC log relay not created\n"); - return; - } - - seq_puts(m, "GuC logging stats:\n"); - - seq_printf(m, "\tRelay full count: %u\n", - log->relay.full_count); - - for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { - seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n", - stringify_guc_log_type(type), - log->stats[type].flush, - log->stats[type].sampled_overflow); - } -} - -static int i915_guc_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_uc *uc = &dev_priv->gt.uc; - - if (!intel_uc_uses_guc(uc)) - return -ENODEV; - - i915_guc_log_info(m, &uc->guc.log); - - /* Add more as required ... */ - - return 0; -} - -static int i915_guc_stage_pool(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_uc *uc = &dev_priv->gt.uc; - struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr; - int index; - - if (!intel_uc_uses_guc_submission(uc)) - return -ENODEV; - - for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) { - struct intel_engine_cs *engine; - - if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE)) - continue; - - seq_printf(m, "GuC stage descriptor %u:\n", index); - seq_printf(m, "\tIndex: %u\n", desc->stage_id); - seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute); - seq_printf(m, "\tPriority: %d\n", desc->priority); - seq_printf(m, "\tDoorbell id: %d\n", desc->db_id); - seq_printf(m, "\tEngines used: 0x%x\n", - desc->engines_used); - seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n", - desc->db_trigger_phy, - desc->db_trigger_cpu, - desc->db_trigger_uk); - seq_printf(m, "\tProcess descriptor: 0x%x\n", - desc->process_desc); - seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n", - desc->wq_addr, desc->wq_size); - seq_putc(m, '\n'); - - for_each_uabi_engine(engine, dev_priv) { - u32 guc_engine_id = engine->guc_id; - struct guc_execlist_context *lrc = - &desc->lrc[guc_engine_id]; - - seq_printf(m, "\t%s LRC:\n", engine->name); - seq_printf(m, "\t\tContext desc: 0x%x\n", - lrc->context_desc); - seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id); - seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca); - seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin); - seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end); - seq_putc(m, '\n'); - } - } - - return 0; -} - -static int i915_guc_log_dump(struct seq_file *m, void *data) -{ - struct drm_info_node *node = m->private; - struct drm_i915_private *dev_priv = node_to_i915(node); - bool dump_load_err = !!node->info_ent->data; - struct drm_i915_gem_object *obj = NULL; - u32 *log; - int i = 0; - - if (!HAS_GT_UC(dev_priv)) - return -ENODEV; - - if (dump_load_err) - obj = dev_priv->gt.uc.load_err_log; - else if (dev_priv->gt.uc.guc.log.vma) - obj = dev_priv->gt.uc.guc.log.vma->obj; - - if (!obj) - return 0; - - log = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(log)) { - DRM_DEBUG("Failed to pin object\n"); - seq_puts(m, "(log data unaccessible)\n"); - return PTR_ERR(log); - } - - for (i = 0; i < obj->base.size / sizeof(u32); i += 4) - seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", - *(log + i), *(log + i + 1), - *(log + i + 2), *(log + i + 3)); - - seq_putc(m, '\n'); - - i915_gem_object_unpin_map(obj); - - return 0; -} - -static int i915_guc_log_level_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - struct intel_uc *uc = &dev_priv->gt.uc; - - if (!intel_uc_uses_guc(uc)) - return -ENODEV; - - *val = intel_guc_log_get_level(&uc->guc.log); - - return 0; -} - -static int i915_guc_log_level_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - struct intel_uc *uc = &dev_priv->gt.uc; - - if (!intel_uc_uses_guc(uc)) - return -ENODEV; - - return intel_guc_log_set_level(&uc->guc.log, val); -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops, - i915_guc_log_level_get, i915_guc_log_level_set, - "%lld\n"); - -static int i915_guc_log_relay_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *i915 = inode->i_private; - struct intel_guc *guc = &i915->gt.uc.guc; - struct intel_guc_log *log = &guc->log; - - if (!intel_guc_is_ready(guc)) - return -ENODEV; - - file->private_data = log; - - return intel_guc_log_relay_open(log); -} - -static ssize_t -i915_guc_log_relay_write(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - struct intel_guc_log *log = filp->private_data; - int val; - int ret; - - ret = kstrtoint_from_user(ubuf, cnt, 0, &val); - if (ret < 0) - return ret; - - /* - * Enable and start the guc log relay on value of 1. - * Flush log relay for any other value. - */ - if (val == 1) - ret = intel_guc_log_relay_start(log); - else - intel_guc_log_relay_flush(log); - - return ret ?: cnt; -} - -static int i915_guc_log_relay_release(struct inode *inode, struct file *file) -{ - struct drm_i915_private *i915 = inode->i_private; - struct intel_guc *guc = &i915->gt.uc.guc; - - intel_guc_log_relay_close(&guc->log); - return 0; -} - -static const struct file_operations i915_guc_log_relay_fops = { - .owner = THIS_MODULE, - .open = i915_guc_log_relay_open, - .write = i915_guc_log_relay_write, - .release = i915_guc_log_relay_release, -}; - static int i915_runtime_pm_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -2139,12 +1855,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_guc_info", i915_guc_info, 0}, - {"i915_guc_load_status", i915_guc_load_status_info, 0}, - {"i915_guc_log_dump", i915_guc_log_dump, 0}, - {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1}, - {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, - {"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, {"i915_context_status", i915_context_status, 0}, @@ -2172,11 +1882,9 @@ static const struct i915_debugfs_files { {"i915_error_state", &i915_error_state_fops}, {"i915_gpu_info", &i915_gpu_info_fops}, #endif - {"i915_guc_log_level", &i915_guc_log_level_fops}, - {"i915_guc_log_relay", &i915_guc_log_relay_fops}, }; -int i915_debugfs_register(struct drm_i915_private *dev_priv) +void i915_debugfs_register(struct drm_i915_private *dev_priv) { struct drm_minor *minor = dev_priv->drm.primary; int i; @@ -2193,7 +1901,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) i915_debugfs_files[i].fops); } - return drm_debugfs_create_files(i915_debugfs_list, - I915_DEBUGFS_ENTRIES, - minor->debugfs_root, minor); + drm_debugfs_create_files(i915_debugfs_list, + I915_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h index 6da39c76ab5e..1de2736f1248 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.h +++ b/drivers/gpu/drm/i915/i915_debugfs.h @@ -12,10 +12,10 @@ struct drm_i915_private; struct seq_file; #ifdef CONFIG_DEBUG_FS -int i915_debugfs_register(struct drm_i915_private *dev_priv); +void i915_debugfs_register(struct drm_i915_private *dev_priv); void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj); #else -static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; } +static inline void i915_debugfs_register(struct drm_i915_private *dev_priv) {} static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {} #endif diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 82d9df15b22b..641f5e03b661 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -43,6 +43,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_irq.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include "display/intel_acpi.h" @@ -431,8 +432,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->backlight_lock); mutex_init(&dev_priv->sb_lock); - pm_qos_add_request(&dev_priv->sb_qos, - PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->wm.wm_mutex); @@ -496,7 +496,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) vlv_suspend_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv); - pm_qos_remove_request(&dev_priv->sb_qos); + cpu_latency_qos_remove_request(&dev_priv->sb_qos); mutex_destroy(&dev_priv->sb_lock); } @@ -673,8 +673,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) } } - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); intel_gt_init_workarounds(dev_priv); @@ -720,7 +719,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); - pm_qos_remove_request(&dev_priv->pm_qos); + cpu_latency_qos_remove_request(&dev_priv->pm_qos); err_mem_regions: intel_memory_regions_driver_release(dev_priv); err_ggtt: @@ -743,7 +742,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) if (pdev->msi_enabled) pci_disable_msi(pdev); - pm_qos_remove_request(&dev_priv->pm_qos); + cpu_latency_qos_remove_request(&dev_priv->pm_qos); } /** @@ -890,6 +889,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) return ERR_PTR(err); } + drmm_add_final_kfree(&i915->drm, i915); + i915->drm.pdev = pdev; pci_set_drvdata(pdev, i915); @@ -903,17 +904,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) return i915; } -static void i915_driver_destroy(struct drm_i915_private *i915) -{ - struct pci_dev *pdev = i915->drm.pdev; - - drm_dev_fini(&i915->drm); - kfree(i915); - - /* And make sure we never chase our dangling pointer from pci_dev */ - pci_set_drvdata(pdev, NULL); -} - /** * i915_driver_probe - setup chip and create an initial config * @pdev: PCI device @@ -995,6 +985,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i915_welcome_messages(i915); + i915->do_release = true; + return 0; out_cleanup_irq: @@ -1014,7 +1006,7 @@ out_pci_disable: pci_disable_device(pdev); out_fini: i915_probe_error(i915, "Device initialization failed (%d)\n", ret); - i915_driver_destroy(i915); + drm_dev_put(&i915->drm); return ret; } @@ -1054,6 +1046,9 @@ static void i915_driver_release(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + if (!dev_priv->do_release) + return; + disable_rpm_wakeref_asserts(rpm); i915_gem_driver_release(dev_priv); @@ -1067,7 +1062,6 @@ static void i915_driver_release(struct drm_device *dev) intel_runtime_pm_driver_release(rpm); i915_driver_late_release(dev_priv); - i915_driver_destroy(dev_priv); } static int i915_driver_open(struct drm_device *dev, struct drm_file *file) @@ -1288,7 +1282,6 @@ static int i915_drm_resume(struct drm_device *dev) drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); i915_ggtt_resume(&dev_priv->ggtt); - i915_gem_restore_fences(&dev_priv->ggtt); intel_csr_ucode_resume(dev_priv); @@ -1606,8 +1599,6 @@ static int intel_runtime_suspend(struct device *kdev) intel_gt_runtime_resume(&dev_priv->gt); - i915_gem_restore_fences(&dev_priv->ggtt); - enable_rpm_wakeref_asserts(rpm); return ret; @@ -1687,7 +1678,6 @@ static int intel_runtime_resume(struct device *kdev) * we can do is to hope that things will still work (and disable RPM). */ intel_gt_runtime_resume(&dev_priv->gt); - i915_gem_restore_fences(&dev_priv->ggtt); /* * On VLV/CHV display interrupts are part of the display diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1f5b9a584f71..b00f0845cbc3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -92,7 +92,6 @@ #include "intel_wopcm.h" #include "i915_gem.h" -#include "i915_gem_fence_reg.h" #include "i915_gem_gtt.h" #include "i915_gpu_error.h" #include "i915_perf_types.h" @@ -109,8 +108,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20200313" -#define DRIVER_TIMESTAMP 1584144591 +#define DRIVER_DATE "20200417" +#define DRIVER_TIMESTAMP 1587105300 struct drm_i915_gem_object; @@ -417,6 +416,7 @@ struct intel_fbc { struct { const struct drm_format_info *format; unsigned int stride; + u64 modifier; } fb; u16 gen9_wa_cfb_stride; s8 fence_id; @@ -540,7 +540,6 @@ struct i915_suspend_saved_registers { u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF3[3]; - u64 saveFENCE[I915_MAX_NUM_FENCES]; u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -823,6 +822,9 @@ struct i915_selftest_stash { struct drm_i915_private { struct drm_device drm; + /* FIXME: Device release actions should all be moved to drmm_ */ + bool do_release; + const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ struct intel_driver_caps caps; @@ -885,7 +887,6 @@ struct drm_i915_private { struct pci_dev *bridge_dev; - struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct rb_root uabi_engines; struct resource mch_res; @@ -1507,6 +1508,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_ICELAKE(p) && IS_REVID(p, since, until)) #define TGL_REVID_A0 0x0 +#define TGL_REVID_B0 0x1 +#define TGL_REVID_C0 0x2 #define IS_TGL_REVID(p, since, until) \ (IS_TIGERLAKE(p) && IS_REVID(p, since, until)) @@ -1604,7 +1607,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg) #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) -#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0) +#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) @@ -1738,6 +1741,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, unsigned long flags); #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) +#define I915_GEM_OBJECT_UNBIND_TEST BIT(2) void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ca5420012a22..0cbcb9f54e7d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -118,7 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; - if (!atomic_read(&obj->bind_count)) + if (list_empty(&obj->vma.list)) return 0; /* @@ -141,6 +141,11 @@ try_again: if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) continue; + if (flags & I915_GEM_OBJECT_UNBIND_TEST) { + ret = -EBUSY; + break; + } + ret = -EAGAIN; if (!i915_vm_tryopen(vm)) break; @@ -993,18 +998,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(ret); } + ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + if (ret) + return ERR_PTR(ret); + if (vma->fence && !i915_gem_object_is_tiled(obj)) { mutex_lock(&ggtt->vm.mutex); - ret = i915_vma_revoke_fence(vma); + i915_vma_revoke_fence(vma); mutex_unlock(&ggtt->vm.mutex); - if (ret) - return ERR_PTR(ret); } - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); - if (ret) - return ERR_PTR(ret); - ret = i915_vma_wait_for_bind(vma); if (ret) { i915_vma_unpin(vma); @@ -1156,7 +1159,6 @@ err_unlock: /* Minimal basic recovery for KMS */ ret = i915_ggtt_enable_hw(dev_priv); i915_ggtt_resume(&dev_priv->ggtt); - i915_gem_restore_fences(&dev_priv->ggtt); intel_init_clock_gating(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 4518b9b35c3d..0ba7b1e881c0 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -228,7 +228,12 @@ found: while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { vma = container_of(node, struct i915_vma, node); - ret = __i915_vma_unbind(vma); + + /* If we find any non-objects (!vma), we cannot evict them */ + if (vma->node.color != I915_COLOR_UNEVICTABLE) + ret = __i915_vma_unbind(vma); + else + ret = -ENOSPC; /* XXX search failed, try again? */ } return ret; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2a4cd0ba5464..424ad975a360 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1858,7 +1858,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error) return; i915 = error->i915; - dev_info(i915->drm.dev, "%s\n", error_msg(error)); + drm_info(&i915->drm, "%s\n", error_msg(error)); if (error->simulated || cmpxchg(&i915->gpu_error.first_error, NULL, error)) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9f0653cf0510..1502ab44f1a5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3658,7 +3658,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -3763,7 +3763,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -3905,10 +3905,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); if (iir & I915_BSD_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c index fdd550405fd3..7b3b83bd5ab8 100644 --- a/drivers/gpu/drm/i915/i915_memcpy.c +++ b/drivers/gpu/drm/i915/i915_memcpy.c @@ -35,7 +35,6 @@ static DEFINE_STATIC_KEY_FALSE(has_movntdqa); -#ifdef CONFIG_AS_MOVNTDQA static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) { kernel_fpu_begin(); @@ -93,10 +92,6 @@ static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) kernel_fpu_end(); } -#else -static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {} -static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {} -#endif /** * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 2c80a0194c80..66738f2c4f28 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -160,6 +160,7 @@ GEN(2), \ .is_mobile = 1, \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -179,6 +180,7 @@ #define I845_FEATURES \ GEN(2), \ .pipe_mask = BIT(PIPE_A), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -218,6 +220,7 @@ static const struct intel_device_info i865g_info = { #define GEN3_FEATURES \ GEN(3), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .engine_mask = BIT(RCS0), \ @@ -303,6 +306,7 @@ static const struct intel_device_info pnv_m_info = { #define GEN4_FEATURES \ GEN(4), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ @@ -354,6 +358,7 @@ static const struct intel_device_info gm45_info = { #define GEN5_FEATURES \ GEN(5), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ @@ -381,6 +386,7 @@ static const struct intel_device_info ilk_m_info = { #define GEN6_FEATURES \ GEN(6), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -430,6 +436,7 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -482,6 +489,7 @@ static const struct intel_device_info ivb_q_info = { PLATFORM(INTEL_IVYBRIDGE), .gt = 2, .pipe_mask = 0, /* legal, last one wins */ + .cpu_transcoder_mask = 0, .has_l3_dpf = 1, }; @@ -490,6 +498,7 @@ static const struct intel_device_info vlv_info = { GEN(7), .is_lp = 1, .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_rps = true, @@ -511,6 +520,8 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ .display.has_psr = 1, \ @@ -581,6 +592,7 @@ static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), @@ -656,6 +668,9 @@ static const struct intel_device_info skl_gt4_info = { .display.has_hotplug = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ + BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ @@ -759,6 +774,9 @@ static const struct intel_device_info cnl_info = { #define GEN11_FEATURES \ GEN10_FEATURES, \ GEN11_DEFAULT_PAGE_SIZES, \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -799,6 +817,10 @@ static const struct intel_device_info ehl_info = { #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -822,7 +844,6 @@ static const struct intel_device_info ehl_info = { static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 551be589d6f4..5cde3e4e7be6 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -204,21 +204,6 @@ #include "i915_drv.h" #include "i915_perf.h" -#include "oa/i915_oa_hsw.h" -#include "oa/i915_oa_bdw.h" -#include "oa/i915_oa_chv.h" -#include "oa/i915_oa_sklgt2.h" -#include "oa/i915_oa_sklgt3.h" -#include "oa/i915_oa_sklgt4.h" -#include "oa/i915_oa_bxt.h" -#include "oa/i915_oa_kblgt2.h" -#include "oa/i915_oa_kblgt3.h" -#include "oa/i915_oa_glk.h" -#include "oa/i915_oa_cflgt2.h" -#include "oa/i915_oa_cflgt3.h" -#include "oa/i915_oa_cnl.h" -#include "oa/i915_oa_icl.h" -#include "oa/i915_oa_tgl.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -238,26 +223,17 @@ * * Although this can be observed explicitly while copying reports to userspace * by checking for a zeroed report-id field in tail reports, we want to account - * for this earlier, as part of the oa_buffer_check to avoid lots of redundant - * read() attempts. - * - * In effect we define a tail pointer for reading that lags the real tail - * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough - * time for the corresponding reports to become visible to the CPU. - * - * To manage this we actually track two tail pointers: - * 1) An 'aging' tail with an associated timestamp that is tracked until we - * can trust the corresponding data is visible to the CPU; at which point - * it is considered 'aged'. - * 2) An 'aged' tail that can be used for read()ing. - * - * The two separate pointers let us decouple read()s from tail pointer aging. - * - * The tail pointers are checked and updated at a limited rate within a hrtimer - * callback (the same callback that is used for delivering EPOLLIN events) - * - * Initially the tails are marked invalid with %INVALID_TAIL_PTR which - * indicates that an updated tail pointer is needed. + * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of + * redundant read() attempts. + * + * We workaround this issue in oa_buffer_check_unlocked() by reading the reports + * in the OA buffer, starting from the tail reported by the HW until we find a + * report with its first 2 dwords not 0 meaning its previous report is + * completely in memory and ready to be read. Those dwords are also set to 0 + * once read and the whole buffer is cleared upon OA buffer initialization. The + * first dword is the reason for this report while the second is the timestamp, + * making the chances of having those 2 fields at 0 fairly unlikely. A more + * detailed explanation is available in oa_buffer_check_unlocked(). * * Most of the implementation details for this workaround are in * oa_buffer_check_unlocked() and _append_oa_reports() @@ -272,11 +248,11 @@ #define OA_TAIL_MARGIN_NSEC 100000ULL #define INVALID_TAIL_PTR 0xffffffff -/* frequency for checking whether the OA unit has written new reports to the - * circular OA buffer... +/* The default frequency for checking whether the OA unit has written new + * reports to the circular OA buffer... */ -#define POLL_FREQUENCY 200 -#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) +#define DEFAULT_POLL_FREQUENCY_HZ 200 +#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ static u32 i915_perf_stream_paranoid = true; @@ -359,6 +335,12 @@ static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = { * @oa_periodic: Whether to enable periodic OA unit sampling * @oa_period_exponent: The OA unit sampling period is derived from this * @engine: The engine (typically rcs0) being monitored by the OA unit + * @has_sseu: Whether @sseu was specified by userspace + * @sseu: internal SSEU configuration computed either from the userspace + * specified configuration in the opening parameters or a default value + * (see get_default_sseu_config()) + * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA + * data availability * * As read_properties_unlocked() enumerates and validates the properties given * to open a stream of metrics the configuration is built up in the structure @@ -378,6 +360,11 @@ struct perf_open_properties { int oa_period_exponent; struct intel_engine_cs *engine; + + bool has_sseu; + struct intel_sseu sseu; + + u64 poll_oa_period; }; struct i915_oa_config_bo { @@ -409,10 +396,7 @@ i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) struct i915_oa_config *oa_config; rcu_read_lock(); - if (metrics_set == 1) - oa_config = &perf->test_config; - else - oa_config = idr_find(&perf->metrics_idr, metrics_set); + oa_config = idr_find(&perf->metrics_idr, metrics_set); if (oa_config) oa_config = i915_oa_config_get(oa_config); rcu_read_unlock(); @@ -465,8 +449,8 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) * (See description of OA_TAIL_MARGIN_NSEC above for further details.) * * Besides returning true when there is data available to read() this function - * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp - * and .aged_tail_idx state used for reading. + * also updates the tail, aging_tail and aging_timestamp in the oa_buffer + * object. * * Note: It's safe to read OA config state here unlocked, assuming that this is * only called while the stream is enabled, while the global OA configuration @@ -476,28 +460,19 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) */ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) { + u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); int report_size = stream->oa_buffer.format_size; unsigned long flags; - unsigned int aged_idx; - u32 head, hw_tail, aged_tail, aging_tail; + bool pollin; + u32 hw_tail; u64 now; /* We have to consider the (unlikely) possibility that read() errors - * could result in an OA buffer reset which might reset the head, - * tails[] and aged_tail state. + * could result in an OA buffer reset which might reset the head and + * tail state. */ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); - /* NB: The head we observe here might effectively be a little out of - * date (between head and tails[aged_idx].offset if there is currently - * a read() in progress. - */ - head = stream->oa_buffer.head; - - aged_idx = stream->oa_buffer.aged_tail_idx; - aged_tail = stream->oa_buffer.tails[aged_idx].offset; - aging_tail = stream->oa_buffer.tails[!aged_idx].offset; - hw_tail = stream->perf->ops.oa_hw_tail_read(stream); /* The tail pointer increases in 64 byte increments, @@ -507,64 +482,63 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) now = ktime_get_mono_fast_ns(); - /* Update the aged tail - * - * Flip the tail pointer available for read()s once the aging tail is - * old enough to trust that the corresponding data will be visible to - * the CPU... - * - * Do this before updating the aging pointer in case we may be able to - * immediately start aging a new pointer too (if new data has become - * available) without needing to wait for a later hrtimer callback. - */ - if (aging_tail != INVALID_TAIL_PTR && - ((now - stream->oa_buffer.aging_timestamp) > - OA_TAIL_MARGIN_NSEC)) { - - aged_idx ^= 1; - stream->oa_buffer.aged_tail_idx = aged_idx; + if (hw_tail == stream->oa_buffer.aging_tail && + (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) { + /* If the HW tail hasn't move since the last check and the HW + * tail has been aging for long enough, declare it the new + * tail. + */ + stream->oa_buffer.tail = stream->oa_buffer.aging_tail; + } else { + u32 head, tail, aged_tail; - aged_tail = aging_tail; + /* NB: The head we observe here might effectively be a little + * out of date. If a read() is in progress, the head could be + * anywhere between this head and stream->oa_buffer.tail. + */ + head = stream->oa_buffer.head - gtt_offset; + aged_tail = stream->oa_buffer.tail - gtt_offset; + + hw_tail -= gtt_offset; + tail = hw_tail; + + /* Walk the stream backward until we find a report with dword 0 + * & 1 not at 0. Since the circular buffer pointers progress by + * increments of 64 bytes and that reports can be up to 256 + * bytes long, we can't tell whether a report has fully landed + * in memory before the first 2 dwords of the following report + * have effectively landed. + * + * This is assuming that the writes of the OA unit land in + * memory in the order they were written to. + * If not : (╯°□°)╯︵ ┻━┻ + */ + while (OA_TAKEN(tail, aged_tail) >= report_size) { + u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail); - /* Mark that we need a new pointer to start aging... */ - stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; - aging_tail = INVALID_TAIL_PTR; - } + if (report32[0] != 0 || report32[1] != 0) + break; - /* Update the aging tail - * - * We throttle aging tail updates until we have a new tail that - * represents >= one report more data than is already available for - * reading. This ensures there will be enough data for a successful - * read once this new pointer has aged and ensures we will give the new - * pointer time to age. - */ - if (aging_tail == INVALID_TAIL_PTR && - (aged_tail == INVALID_TAIL_PTR || - OA_TAKEN(hw_tail, aged_tail) >= report_size)) { - struct i915_vma *vma = stream->oa_buffer.vma; - u32 gtt_offset = i915_ggtt_offset(vma); - - /* Be paranoid and do a bounds check on the pointer read back - * from hardware, just in case some spurious hardware condition - * could put the tail out of bounds... - */ - if (hw_tail >= gtt_offset && - hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { - stream->oa_buffer.tails[!aged_idx].offset = - aging_tail = hw_tail; - stream->oa_buffer.aging_timestamp = now; - } else { - drm_err(&stream->perf->i915->drm, - "Ignoring spurious out of range OA buffer tail pointer = %x\n", - hw_tail); + tail = (tail - report_size) & (OA_BUFFER_SIZE - 1); } + + if (OA_TAKEN(hw_tail, tail) > report_size && + __ratelimit(&stream->perf->tail_pointer_race)) + DRM_NOTE("unlanded report(s) head=0x%x " + "tail=0x%x hw_tail=0x%x\n", + head, tail, hw_tail); + + stream->oa_buffer.tail = gtt_offset + tail; + stream->oa_buffer.aging_tail = gtt_offset + hw_tail; + stream->oa_buffer.aging_timestamp = now; } + pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset, + stream->oa_buffer.head - gtt_offset) >= report_size; + spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); - return aged_tail == INVALID_TAIL_PTR ? - false : OA_TAKEN(aged_tail, head) >= report_size; + return pollin; } /** @@ -682,7 +656,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, u32 mask = (OA_BUFFER_SIZE - 1); size_t start_offset = *offset; unsigned long flags; - unsigned int aged_tail_idx; u32 head, tail; u32 taken; int ret = 0; @@ -693,19 +666,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); head = stream->oa_buffer.head; - aged_tail_idx = stream->oa_buffer.aged_tail_idx; - tail = stream->oa_buffer.tails[aged_tail_idx].offset; + tail = stream->oa_buffer.tail; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); /* - * An invalid tail pointer here means we're still waiting for the poll - * hrtimer callback to give us a pointer - */ - if (tail == INVALID_TAIL_PTR) - return -EAGAIN; - - /* * NB: oa_buffer.head/tail include the gtt_offset which we don't want * while indexing relative to oa_buf_base. */ @@ -838,13 +803,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, } /* - * The above reason field sanity check is based on - * the assumption that the OA buffer is initially - * zeroed and we reset the field after copying so the - * check is still meaningful once old reports start - * being overwritten. + * Clear out the first 2 dword as a mean to detect unlanded + * reports. */ report32[0] = 0; + report32[1] = 0; } if (start_offset != *offset) { @@ -985,7 +948,6 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, u32 mask = (OA_BUFFER_SIZE - 1); size_t start_offset = *offset; unsigned long flags; - unsigned int aged_tail_idx; u32 head, tail; u32 taken; int ret = 0; @@ -996,17 +958,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); head = stream->oa_buffer.head; - aged_tail_idx = stream->oa_buffer.aged_tail_idx; - tail = stream->oa_buffer.tails[aged_tail_idx].offset; + tail = stream->oa_buffer.tail; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); - /* An invalid tail pointer here means we're still waiting for the poll - * hrtimer callback to give us a pointer - */ - if (tail == INVALID_TAIL_PTR) - return -EAGAIN; - /* NB: oa_buffer.head/tail include the gtt_offset which we don't want * while indexing relative to oa_buf_base. */ @@ -1064,13 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, if (ret) break; - /* The above report-id field sanity check is based on - * the assumption that the OA buffer is initially - * zeroed and we reset the field after copying so the - * check is still meaningful once old reports start - * being overwritten. + /* Clear out the first 2 dwords as a mean to detect unlanded + * reports. */ report32[0] = 0; + report32[1] = 0; } if (start_offset != *offset) { @@ -1449,8 +1402,8 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream) gtt_offset | OABUFFER_SIZE_16M); /* Mark that we need updated tail pointers to read from... */ - stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; - stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; + stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; + stream->oa_buffer.tail = gtt_offset; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); @@ -1472,8 +1425,6 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream) * memory... */ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); - - stream->pollin = false; } static void gen8_init_oa_buffer(struct i915_perf_stream *stream) @@ -1503,8 +1454,8 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream) intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); /* Mark that we need updated tail pointers to read from... */ - stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; - stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; + stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; + stream->oa_buffer.tail = gtt_offset; /* * Reset state used to recognise context switches, affecting which @@ -1528,8 +1479,6 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream) * memory... */ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); - - stream->pollin = false; } static void gen12_init_oa_buffer(struct i915_perf_stream *stream) @@ -1559,8 +1508,8 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream) gtt_offset & GEN12_OAG_OATAILPTR_MASK); /* Mark that we need updated tail pointers to read from... */ - stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; - stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; + stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; + stream->oa_buffer.tail = gtt_offset; /* * Reset state used to recognise context switches, affecting which @@ -1585,8 +1534,6 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream) */ memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.vma->size); - - stream->pollin = false; } static int alloc_oa_buffer(struct i915_perf_stream *stream) @@ -1972,10 +1919,11 @@ out: return i915_vma_get(oa_bo->vma); } -static struct i915_request * +static int emit_oa_config(struct i915_perf_stream *stream, struct i915_oa_config *oa_config, - struct intel_context *ce) + struct intel_context *ce, + struct i915_active *active) { struct i915_request *rq; struct i915_vma *vma; @@ -1983,7 +1931,7 @@ emit_oa_config(struct i915_perf_stream *stream, vma = get_oa_vma(stream, oa_config); if (IS_ERR(vma)) - return ERR_CAST(vma); + return PTR_ERR(vma); err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) @@ -1997,6 +1945,18 @@ emit_oa_config(struct i915_perf_stream *stream, goto err_vma_unpin; } + if (!IS_ERR_OR_NULL(active)) { + /* After all individual context modifications */ + err = i915_request_await_active(rq, active, + I915_ACTIVE_AWAIT_ACTIVE); + if (err) + goto err_add_request; + + err = i915_active_add_request(active, rq); + if (err) + goto err_add_request; + } + i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, 0); if (!err) @@ -2011,14 +1971,13 @@ emit_oa_config(struct i915_perf_stream *stream, if (err) goto err_add_request; - i915_request_get(rq); err_add_request: i915_request_add(rq); err_vma_unpin: i915_vma_unpin(vma); err_vma_put: i915_vma_put(vma); - return err ? ERR_PTR(err) : rq; + return err; } static struct intel_context *oa_context(struct i915_perf_stream *stream) @@ -2026,8 +1985,9 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream) return stream->pinned_ctx ?: stream->engine->kernel_context; } -static struct i915_request * -hsw_enable_metric_set(struct i915_perf_stream *stream) +static int +hsw_enable_metric_set(struct i915_perf_stream *stream, + struct i915_active *active) { struct intel_uncore *uncore = stream->uncore; @@ -2046,7 +2006,9 @@ hsw_enable_metric_set(struct i915_perf_stream *stream) intel_uncore_rmw(uncore, GEN6_UCGCTL1, 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); - return emit_oa_config(stream, stream->oa_config, oa_context(stream)); + return emit_oa_config(stream, + stream->oa_config, oa_context(stream), + active); } static void hsw_disable_metric_set(struct i915_perf_stream *stream) @@ -2116,9 +2078,6 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce, for (i = 0; i < ARRAY_SIZE(flex_regs); i++) reg_state[ctx_flexeu0 + i * 2 + 1] = oa_config_flex_reg(stream->oa_config, flex_regs[i]); - - reg_state[CTX_R_PWR_CLK_STATE] = - intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu); } struct flex { @@ -2196,8 +2155,10 @@ static int gen8_modify_context(struct intel_context *ce, return err; } -static int gen8_modify_self(struct intel_context *ce, - const struct flex *flex, unsigned int count) +static int +gen8_modify_self(struct intel_context *ce, + const struct flex *flex, unsigned int count, + struct i915_active *active) { struct i915_request *rq; int err; @@ -2208,8 +2169,17 @@ static int gen8_modify_self(struct intel_context *ce, if (IS_ERR(rq)) return PTR_ERR(rq); + if (!IS_ERR_OR_NULL(active)) { + err = i915_active_add_request(active, rq); + if (err) + goto err_add_request; + } + err = gen8_load_flex(rq, ce, flex, count); + if (err) + goto err_add_request; +err_add_request: i915_request_add(rq); return err; } @@ -2243,7 +2213,8 @@ static int gen8_configure_context(struct i915_gem_context *ctx, return err; } -static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable) +static int gen12_configure_oar_context(struct i915_perf_stream *stream, + struct i915_active *active) { int err; struct intel_context *ce = stream->pinned_ctx; @@ -2252,7 +2223,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena { GEN8_OACTXCONTROL, stream->perf->ctx_oactxctrl_offset + 1, - enable ? GEN8_OA_COUNTER_RESUME : 0, + active ? GEN8_OA_COUNTER_RESUME : 0, }, }; /* Offsets in regs_lri are not used since this configuration is only @@ -2264,13 +2235,13 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena GEN12_OAR_OACONTROL, GEN12_OAR_OACONTROL_OFFSET + 1, (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | - (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) + (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) }, { RING_CONTEXT_CONTROL(ce->engine->mmio_base), CTX_CONTEXT_CONTROL, _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, - enable ? + active ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0) }, @@ -2287,7 +2258,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena return err; /* Apply regs_lri using LRI with pinned context */ - return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri)); + return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active); } /* @@ -2315,9 +2286,11 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena * Note: it's only the RCS/Render context that has any OA state. * Note: the first flex register passed must always be R_PWR_CLK_STATE */ -static int oa_configure_all_contexts(struct i915_perf_stream *stream, - struct flex *regs, - size_t num_regs) +static int +oa_configure_all_contexts(struct i915_perf_stream *stream, + struct flex *regs, + size_t num_regs, + struct i915_active *active) { struct drm_i915_private *i915 = stream->perf->i915; struct intel_engine_cs *engine; @@ -2374,7 +2347,7 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream, regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); - err = gen8_modify_self(ce, regs, num_regs); + err = gen8_modify_self(ce, regs, num_regs, active); if (err) return err; } @@ -2382,8 +2355,10 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream, return 0; } -static int gen12_configure_all_contexts(struct i915_perf_stream *stream, - const struct i915_oa_config *oa_config) +static int +gen12_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config, + struct i915_active *active) { struct flex regs[] = { { @@ -2392,11 +2367,15 @@ static int gen12_configure_all_contexts(struct i915_perf_stream *stream, }, }; - return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); + return oa_configure_all_contexts(stream, + regs, ARRAY_SIZE(regs), + active); } -static int lrc_configure_all_contexts(struct i915_perf_stream *stream, - const struct i915_oa_config *oa_config) +static int +lrc_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config, + struct i915_active *active) { /* The MMIO offsets for Flex EU registers aren't contiguous */ const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; @@ -2429,11 +2408,14 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, for (i = 2; i < ARRAY_SIZE(regs); i++) regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); - return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); + return oa_configure_all_contexts(stream, + regs, ARRAY_SIZE(regs), + active); } -static struct i915_request * -gen8_enable_metric_set(struct i915_perf_stream *stream) +static int +gen8_enable_metric_set(struct i915_perf_stream *stream, + struct i915_active *active) { struct intel_uncore *uncore = stream->uncore; struct i915_oa_config *oa_config = stream->oa_config; @@ -2473,11 +2455,13 @@ gen8_enable_metric_set(struct i915_perf_stream *stream) * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = lrc_configure_all_contexts(stream, oa_config); + ret = lrc_configure_all_contexts(stream, oa_config, active); if (ret) - return ERR_PTR(ret); + return ret; - return emit_oa_config(stream, oa_config, oa_context(stream)); + return emit_oa_config(stream, + stream->oa_config, oa_context(stream), + active); } static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) @@ -2487,8 +2471,9 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); } -static struct i915_request * -gen12_enable_metric_set(struct i915_perf_stream *stream) +static int +gen12_enable_metric_set(struct i915_perf_stream *stream, + struct i915_active *active) { struct intel_uncore *uncore = stream->uncore; struct i915_oa_config *oa_config = stream->oa_config; @@ -2517,9 +2502,9 @@ gen12_enable_metric_set(struct i915_perf_stream *stream) * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = gen12_configure_all_contexts(stream, oa_config); + ret = gen12_configure_all_contexts(stream, oa_config, active); if (ret) - return ERR_PTR(ret); + return ret; /* * For Gen12, performance counters are context @@ -2527,12 +2512,14 @@ gen12_enable_metric_set(struct i915_perf_stream *stream) * requested this. */ if (stream->ctx) { - ret = gen12_configure_oar_context(stream, true); + ret = gen12_configure_oar_context(stream, active); if (ret) - return ERR_PTR(ret); + return ret; } - return emit_oa_config(stream, oa_config, oa_context(stream)); + return emit_oa_config(stream, + stream->oa_config, oa_context(stream), + active); } static void gen8_disable_metric_set(struct i915_perf_stream *stream) @@ -2540,7 +2527,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream) struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - lrc_configure_all_contexts(stream, NULL); + lrc_configure_all_contexts(stream, NULL, NULL); intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); } @@ -2550,7 +2537,7 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream) struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - lrc_configure_all_contexts(stream, NULL); + lrc_configure_all_contexts(stream, NULL, NULL); /* Make sure we disable noa to save power. */ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); @@ -2561,11 +2548,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream) struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - gen12_configure_all_contexts(stream, NULL); + gen12_configure_all_contexts(stream, NULL, NULL); /* disable the context save/restore or OAR counters */ if (stream->ctx) - gen12_configure_oar_context(stream, false); + gen12_configure_oar_context(stream, NULL); /* Make sure we disable noa to save power. */ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); @@ -2657,11 +2644,13 @@ static void gen12_oa_enable(struct i915_perf_stream *stream) */ static void i915_oa_stream_enable(struct i915_perf_stream *stream) { + stream->pollin = false; + stream->perf->ops.oa_enable(stream); if (stream->periodic) hrtimer_start(&stream->poll_check_timer, - ns_to_ktime(POLL_PERIOD), + ns_to_ktime(stream->poll_oa_period), HRTIMER_MODE_REL_PINNED); } @@ -2737,16 +2726,52 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = { static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) { - struct i915_request *rq; + struct i915_active *active; + int err; - rq = stream->perf->ops.enable_metric_set(stream); - if (IS_ERR(rq)) - return PTR_ERR(rq); + active = i915_active_create(); + if (!active) + return -ENOMEM; - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); + err = stream->perf->ops.enable_metric_set(stream, active); + if (err == 0) + __i915_active_wait(active, TASK_UNINTERRUPTIBLE); - return 0; + i915_active_put(active); + return err; +} + +static void +get_default_sseu_config(struct intel_sseu *out_sseu, + struct intel_engine_cs *engine) +{ + const struct sseu_dev_info *devinfo_sseu = + &RUNTIME_INFO(engine->i915)->sseu; + + *out_sseu = intel_sseu_from_device_info(devinfo_sseu); + + if (IS_GEN(engine->i915, 11)) { + /* + * We only need subslice count so it doesn't matter which ones + * we select - just turn off low bits in the amount of half of + * all available subslices per slice. + */ + out_sseu->subslice_mask = + ~(~0 << (hweight8(out_sseu->subslice_mask) / 2)); + out_sseu->slice_mask = 0x1; + } +} + +static int +get_sseu_config(struct intel_sseu *out_sseu, + struct intel_engine_cs *engine, + const struct drm_i915_gem_context_param_sseu *drm_sseu) +{ + if (drm_sseu->engine.engine_class != engine->uabi_class || + drm_sseu->engine.engine_instance != engine->uabi_instance) + return -EINVAL; + + return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu); } /** @@ -2881,6 +2906,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, goto err_oa_buf_alloc; stream->ops = &i915_oa_stream_ops; + + perf->sseu = props->sseu; WRITE_ONCE(perf->exclusive_stream, stream); ret = i915_perf_stream_enable_sync(stream); @@ -2932,58 +2959,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce, /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ stream = READ_ONCE(engine->i915->perf.exclusive_stream); - /* - * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller - * is already doing that, so nothing to be done for gen12 here. - */ if (stream && INTEL_GEN(stream->perf->i915) < 12) gen8_update_reg_state_unlocked(ce, stream); } /** - * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation - * @stream: An i915 perf stream - * @file: An i915 perf stream file - * @buf: destination buffer given by userspace - * @count: the number of bytes userspace wants to read - * @ppos: (inout) file seek position (unused) - * - * Besides wrapping &i915_perf_stream_ops->read this provides a common place to - * ensure that if we've successfully copied any data then reporting that takes - * precedence over any internal error status, so the data isn't lost. - * - * For example ret will be -ENOSPC whenever there is more buffered data than - * can be copied to userspace, but that's only interesting if we weren't able - * to copy some data because it implies the userspace buffer is too small to - * receive a single record (and we never split records). - * - * Another case with ret == -EFAULT is more of a grey area since it would seem - * like bad form for userspace to ask us to overrun its buffer, but the user - * knows best: - * - * http://yarchive.net/comp/linux/partial_reads_writes.html - * - * Returns: The number of bytes copied or a negative error code on failure. - */ -static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, - struct file *file, - char __user *buf, - size_t count, - loff_t *ppos) -{ - /* Note we keep the offset (aka bytes read) separate from any - * error status so that the final check for whether we return - * the bytes read with a higher precedence than any error (see - * comment below) doesn't need to be handled/duplicated in - * stream->ops->read() implementations. - */ - size_t offset = 0; - int ret = stream->ops->read(stream, buf, count, &offset); - - return offset ?: (ret ?: -EAGAIN); -} - -/** * i915_perf_read - handles read() FOP for i915 perf stream FDs * @file: An i915 perf stream file * @buf: destination buffer given by userspace @@ -3008,7 +2988,8 @@ static ssize_t i915_perf_read(struct file *file, { struct i915_perf_stream *stream = file->private_data; struct i915_perf *perf = stream->perf; - ssize_t ret; + size_t offset = 0; + int ret; /* To ensure it's handled consistently we simply treat all reads of a * disabled stream as an error. In particular it might otherwise lead @@ -3031,13 +3012,12 @@ static ssize_t i915_perf_read(struct file *file, return ret; mutex_lock(&perf->lock); - ret = i915_perf_read_locked(stream, file, - buf, count, ppos); + ret = stream->ops->read(stream, buf, count, &offset); mutex_unlock(&perf->lock); - } while (ret == -EAGAIN); + } while (!offset && !ret); } else { mutex_lock(&perf->lock); - ret = i915_perf_read_locked(stream, file, buf, count, ppos); + ret = stream->ops->read(stream, buf, count, &offset); mutex_unlock(&perf->lock); } @@ -3048,15 +3028,15 @@ static ssize_t i915_perf_read(struct file *file, * and read() returning -EAGAIN. Clearing the oa.pollin state here * effectively ensures we back off until the next hrtimer callback * before reporting another EPOLLIN event. + * The exception to this is if ops->read() returned -ENOSPC which means + * that more OA data is available than could fit in the user provided + * buffer. In this case we want the next poll() call to not block. */ - if (ret >= 0 || ret == -EAGAIN) { - /* Maybe make ->pollin per-stream state if we support multiple - * concurrent streams in the future. - */ + if (ret != -ENOSPC) stream->pollin = false; - } - return ret; + /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ + return offset ?: (ret ?: -EAGAIN); } static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) @@ -3069,7 +3049,8 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) wake_up(&stream->poll_wq); } - hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); + hrtimer_forward_now(hrtimer, + ns_to_ktime(stream->poll_oa_period)); return HRTIMER_RESTART; } @@ -3200,7 +3181,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, return -EINVAL; if (config != stream->oa_config) { - struct i915_request *rq; + int err; /* * If OA is bound to a specific context, emit the @@ -3211,13 +3192,11 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, * When set globally, we use a low priority kernel context, * so it will effectively take effect when idle. */ - rq = emit_oa_config(stream, config, oa_context(stream)); - if (!IS_ERR(rq)) { + err = emit_oa_config(stream, config, oa_context(stream), NULL); + if (!err) config = xchg(&stream->oa_config, config); - i915_request_put(rq); - } else { - ret = PTR_ERR(rq); - } + else + ret = err; } i915_oa_config_put(config); @@ -3430,6 +3409,14 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf, privileged_op = true; } + /* + * Asking for SSEU configuration is a priviliged operation. + */ + if (props->has_sseu) + privileged_op = true; + else + get_default_sseu_config(&props->sseu, props->engine); + /* Similar to perf's kernel.perf_paranoid_cpu sysctl option * we check a dev.i915.perf_stream_paranoid sysctl option * to determine if it's ok to access system wide OA counters @@ -3450,6 +3437,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf, stream->perf = perf; stream->ctx = specific_ctx; + stream->poll_oa_period = props->poll_oa_period; ret = i915_oa_stream_init(stream, param, props); if (ret) @@ -3525,8 +3513,10 @@ static int read_properties_unlocked(struct i915_perf *perf, { u64 __user *uprop = uprops; u32 i; + int ret; memset(props, 0, sizeof(struct perf_open_properties)); + props->poll_oa_period = DEFAULT_POLL_PERIOD_NS; if (!n_props) { DRM_DEBUG("No i915 perf properties given\n"); @@ -3556,7 +3546,6 @@ static int read_properties_unlocked(struct i915_perf *perf, for (i = 0; i < n_props; i++) { u64 oa_period, oa_freq_hz; u64 id, value; - int ret; ret = get_user(id, uprop); if (ret) @@ -3642,6 +3631,32 @@ static int read_properties_unlocked(struct i915_perf *perf, case DRM_I915_PERF_PROP_HOLD_PREEMPTION: props->hold_preemption = !!value; break; + case DRM_I915_PERF_PROP_GLOBAL_SSEU: { + struct drm_i915_gem_context_param_sseu user_sseu; + + if (copy_from_user(&user_sseu, + u64_to_user_ptr(value), + sizeof(user_sseu))) { + DRM_DEBUG("Unable to copy global sseu parameter\n"); + return -EFAULT; + } + + ret = get_sseu_config(&props->sseu, props->engine, &user_sseu); + if (ret) { + DRM_DEBUG("Invalid SSEU configuration\n"); + return ret; + } + props->has_sseu = true; + break; + } + case DRM_I915_PERF_PROP_POLL_OA_PERIOD: + if (value < 100000 /* 100us */) { + DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n", + value); + return -EINVAL; + } + props->poll_oa_period = value; + break; case DRM_I915_PERF_PROP_MAX: MISSING_CASE(id); return -EINVAL; @@ -3724,7 +3739,6 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, void i915_perf_register(struct drm_i915_private *i915) { struct i915_perf *perf = &i915->perf; - int ret; if (!perf->i915) return; @@ -3738,64 +3752,7 @@ void i915_perf_register(struct drm_i915_private *i915) perf->metrics_kobj = kobject_create_and_add("metrics", &i915->drm.primary->kdev->kobj); - if (!perf->metrics_kobj) - goto exit; - - sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr); - - if (IS_TIGERLAKE(i915)) { - i915_perf_load_test_config_tgl(i915); - } else if (INTEL_GEN(i915) >= 11) { - i915_perf_load_test_config_icl(i915); - } else if (IS_CANNONLAKE(i915)) { - i915_perf_load_test_config_cnl(i915); - } else if (IS_COFFEELAKE(i915)) { - if (IS_CFL_GT2(i915)) - i915_perf_load_test_config_cflgt2(i915); - if (IS_CFL_GT3(i915)) - i915_perf_load_test_config_cflgt3(i915); - } else if (IS_GEMINILAKE(i915)) { - i915_perf_load_test_config_glk(i915); - } else if (IS_KABYLAKE(i915)) { - if (IS_KBL_GT2(i915)) - i915_perf_load_test_config_kblgt2(i915); - else if (IS_KBL_GT3(i915)) - i915_perf_load_test_config_kblgt3(i915); - } else if (IS_BROXTON(i915)) { - i915_perf_load_test_config_bxt(i915); - } else if (IS_SKYLAKE(i915)) { - if (IS_SKL_GT2(i915)) - i915_perf_load_test_config_sklgt2(i915); - else if (IS_SKL_GT3(i915)) - i915_perf_load_test_config_sklgt3(i915); - else if (IS_SKL_GT4(i915)) - i915_perf_load_test_config_sklgt4(i915); - } else if (IS_CHERRYVIEW(i915)) { - i915_perf_load_test_config_chv(i915); - } else if (IS_BROADWELL(i915)) { - i915_perf_load_test_config_bdw(i915); - } else if (IS_HASWELL(i915)) { - i915_perf_load_test_config_hsw(i915); - } - - if (perf->test_config.id == 0) - goto sysfs_error; - - ret = sysfs_create_group(perf->metrics_kobj, - &perf->test_config.sysfs_metric); - if (ret) - goto sysfs_error; - - perf->test_config.perf = perf; - kref_init(&perf->test_config.ref); - goto exit; - -sysfs_error: - kobject_put(perf->metrics_kobj); - perf->metrics_kobj = NULL; - -exit: mutex_unlock(&perf->lock); } @@ -3815,9 +3772,6 @@ void i915_perf_unregister(struct drm_i915_private *i915) if (!perf->metrics_kobj) return; - sysfs_remove_group(perf->metrics_kobj, - &perf->test_config.sysfs_metric); - kobject_put(perf->metrics_kobj); perf->metrics_kobj = NULL; } @@ -4416,6 +4370,11 @@ void i915_perf_init(struct drm_i915_private *i915) ratelimit_set_flags(&perf->spurious_report_rs, RATELIMIT_MSG_ON_RELEASE); + ratelimit_state_init(&perf->tail_pointer_race, + 5 * HZ, 10); + ratelimit_set_flags(&perf->tail_pointer_race, + RATELIMIT_MSG_ON_RELEASE); + atomic64_set(&perf->noa_programming_delay, 500 * 1000 /* 500us */); @@ -4476,8 +4435,15 @@ int i915_perf_ioctl_version(void) * preemption on a particular context so that performance data is * accessible from a delta of MI_RPC reports without looking at the * OA buffer. + * + * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can + * be run for the duration of the performance recording based on + * their SSEU configuration. + * + * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the + * interval for the hrtimer used to check for OA data. */ - return 3; + return 5; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index a0e22f00f6cf..a36a455ae336 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -16,11 +16,13 @@ #include <linux/uuid.h> #include <linux/wait.h> +#include "gt/intel_sseu.h" #include "i915_reg.h" #include "intel_wakeref.h" struct drm_i915_private; struct file; +struct i915_active; struct i915_gem_context; struct i915_perf; struct i915_vma; @@ -272,21 +274,10 @@ struct i915_perf_stream { spinlock_t ptr_lock; /** - * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to - * used for reading. - * - * Initial values of 0xffffffff are invalid and imply that an - * update is required (and should be ignored by an attempted - * read) - */ - struct { - u32 offset; - } tails[2]; - - /** - * @aged_tail_idx: Index for the aged tail ready to read() data up to. + * @aging_tail: The last HW tail reported by HW. The data + * might not have made it to memory yet though. */ - unsigned int aged_tail_idx; + u32 aging_tail; /** * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer @@ -302,6 +293,11 @@ struct i915_perf_stream { * OA buffer data to userspace. */ u32 head; + + /** + * @tail: The last verified tail that can be read by userspace. + */ + u32 tail; } oa_buffer; /** @@ -309,6 +305,12 @@ struct i915_perf_stream { * reprogrammed. */ struct i915_vma *noa_wait; + + /** + * @poll_oa_period: The period in nanoseconds at which the OA + * buffer should be checked for available data. + */ + u64 poll_oa_period; }; /** @@ -339,8 +341,8 @@ struct i915_oa_ops { * counter reports being sampled. May apply system constraints such as * disabling EU clock gating as required. */ - struct i915_request * - (*enable_metric_set)(struct i915_perf_stream *stream); + int (*enable_metric_set)(struct i915_perf_stream *stream, + struct i915_active *active); /** * @disable_metric_set: Remove system constraints associated with using @@ -408,12 +410,22 @@ struct i915_perf { struct i915_perf_stream *exclusive_stream; /** + * @sseu: sseu configuration selected to run while perf is active, + * applies to all contexts. + */ + struct intel_sseu sseu; + + /** * For rate limiting any notifications of spurious * invalid OA reports */ struct ratelimit_state spurious_report_rs; - struct i915_oa_config test_config; + /** + * For rate limiting any notifications of tail pointer + * race. + */ + struct ratelimit_state tail_pointer_race; u32 gen7_latched_oastatus1; u32 ctx_oactxctrl_offset; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 2c062534eac1..230e9256ab30 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1115,7 +1115,7 @@ void i915_pmu_register(struct drm_i915_private *i915) int ret = -ENOMEM; if (INTEL_GEN(i915) <= 2) { - dev_info(i915->drm.dev, "PMU not supported for this GPU."); + drm_info(&i915->drm, "PMU not supported for this GPU."); return; } @@ -1178,7 +1178,7 @@ err_name: if (!is_igp(i915)) kfree(pmu->name); err: - dev_notice(i915->drm.dev, "Failed to register PMU!\n"); + drm_notice(&i915->drm, "Failed to register PMU!\n"); } void i915_pmu_unregister(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 59e64acc2c56..edda3f29c8aa 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3094,6 +3094,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) #define GT_BSD_USER_INTERRUPT (1 << 12) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ +#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) /* bdw+ */ #define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) @@ -4324,6 +4325,96 @@ enum { #define EXITLINE_MASK REG_GENMASK(12, 0) #define EXITLINE_SHIFT 0 +/* VRR registers */ +#define _TRANS_VRR_CTL_A 0x60420 +#define _TRANS_VRR_CTL_B 0x61420 +#define _TRANS_VRR_CTL_C 0x62420 +#define _TRANS_VRR_CTL_D 0x63420 +#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A) +#define VRR_CTL_VRR_ENABLE REG_BIT(31) +#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30) +#define VRR_CTL_FLIP_LINE_EN REG_BIT(29) +#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3) +#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0) + +#define _TRANS_VRR_VMAX_A 0x60424 +#define _TRANS_VRR_VMAX_B 0x61424 +#define _TRANS_VRR_VMAX_C 0x62424 +#define _TRANS_VRR_VMAX_D 0x63424 +#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A) +#define VRR_VMAX_MASK REG_GENMASK(19, 0) + +#define _TRANS_VRR_VMIN_A 0x60434 +#define _TRANS_VRR_VMIN_B 0x61434 +#define _TRANS_VRR_VMIN_C 0x62434 +#define _TRANS_VRR_VMIN_D 0x63434 +#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A) +#define VRR_VMIN_MASK REG_GENMASK(15, 0) + +#define _TRANS_VRR_VMAXSHIFT_A 0x60428 +#define _TRANS_VRR_VMAXSHIFT_B 0x61428 +#define _TRANS_VRR_VMAXSHIFT_C 0x62428 +#define _TRANS_VRR_VMAXSHIFT_D 0x63428 +#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \ + _TRANS_VRR_VMAXSHIFT_A) +#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16) +#define VRR_VMAXSHIFT_DEC REG_BIT(16) +#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0) + +#define _TRANS_VRR_STATUS_A 0x6042C +#define _TRANS_VRR_STATUS_B 0x6142C +#define _TRANS_VRR_STATUS_C 0x6242C +#define _TRANS_VRR_STATUS_D 0x6342C +#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A) +#define VRR_STATUS_VMAX_REACHED REG_BIT(31) +#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30) +#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29) +#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28) +#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27) +#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26) +#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20) +#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0) +#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1) +#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2) +#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3) +#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4) +#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5) +#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6) + +#define _TRANS_VRR_VTOTAL_PREV_A 0x60480 +#define _TRANS_VRR_VTOTAL_PREV_B 0x61480 +#define _TRANS_VRR_VTOTAL_PREV_C 0x62480 +#define _TRANS_VRR_VTOTAL_PREV_D 0x63480 +#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \ + _TRANS_VRR_VTOTAL_PREV_A) +#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31) +#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30) +#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29) +#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0) + +#define _TRANS_VRR_FLIPLINE_A 0x60438 +#define _TRANS_VRR_FLIPLINE_B 0x61438 +#define _TRANS_VRR_FLIPLINE_C 0x62438 +#define _TRANS_VRR_FLIPLINE_D 0x63438 +#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \ + _TRANS_VRR_FLIPLINE_A) +#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0) + +#define _TRANS_VRR_STATUS2_A 0x6043C +#define _TRANS_VRR_STATUS2_B 0x6143C +#define _TRANS_VRR_STATUS2_C 0x6243C +#define _TRANS_VRR_STATUS2_D 0x6343C +#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A) +#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0) + +#define _TRANS_PUSH_A 0x60A70 +#define _TRANS_PUSH_B 0x61A70 +#define _TRANS_PUSH_C 0x62A70 +#define _TRANS_PUSH_D 0x63A70 +#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A) +#define TRANS_PUSH_EN REG_BIT(31) +#define TRANS_PUSH_SEND REG_BIT(30) + /* * HSW+ eDP PSR registers * @@ -6764,7 +6855,7 @@ enum { #define PLANE_CTL_FORMAT_P012 (5 << 24) #define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24) #define PLANE_CTL_FORMAT_P016 (7 << 24) -#define PLANE_CTL_FORMAT_AYUV (8 << 24) +#define PLANE_CTL_FORMAT_XYUV (8 << 24) #define PLANE_CTL_FORMAT_INDEXED (12 << 24) #define PLANE_CTL_FORMAT_RGB_565 (14 << 24) #define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23) @@ -9700,8 +9791,11 @@ enum skl_power_gate { #define TRANS_DDI_BPC_10 (1 << 20) #define TRANS_DDI_BPC_6 (2 << 20) #define TRANS_DDI_BPC_12 (3 << 20) +#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18) /* bdw-cnl */ +#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x)) #define TRANS_DDI_PVSYNC (1 << 17) #define TRANS_DDI_PHSYNC (1 << 16) +#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15) /* bdw-cnl */ #define TRANS_DDI_EDP_INPUT_MASK (7 << 12) #define TRANS_DDI_EDP_INPUT_A_ON (0 << 12) #define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12) @@ -9728,12 +9822,10 @@ enum skl_power_gate { #define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404 #define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404 #define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04 -#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ - _TRANS_DDI_FUNC_CTL2_A) -#define PORT_SYNC_MODE_ENABLE (1 << 4) -#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0) -#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) -#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 +#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A) +#define PORT_SYNC_MODE_ENABLE REG_BIT(4) +#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0) +#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x)) /* DisplayPort Transport Control */ #define _DP_TP_CTL_A 0x64040 @@ -9794,6 +9886,24 @@ enum skl_power_gate { #define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31) #define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4) +/* DDI DP Compliance Control */ +#define _DDI_DP_COMP_CTL_A 0x605F0 +#define _DDI_DP_COMP_CTL_B 0x615F0 +#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B) +#define DDI_DP_COMP_CTL_ENABLE (1 << 31) +#define DDI_DP_COMP_CTL_D10_2 (0 << 28) +#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28) +#define DDI_DP_COMP_CTL_PRBS7 (2 << 28) +#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28) +#define DDI_DP_COMP_CTL_HBR2 (4 << 28) +#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28) +#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0) + +/* DDI DP Compliance Pattern */ +#define _DDI_DP_COMP_PAT_A 0x605F4 +#define _DDI_DP_COMP_PAT_B 0x615F4 +#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4) + /* Sideband Interface (SBI) is programmed indirectly, via * SBI_ADDR, which contains the register offset; and SBI_DATA, * which contains the payload */ @@ -10741,6 +10851,12 @@ enum skl_power_gate { #define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C #define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C +#define PAL_PREC_MULTI_SEG_RED_LDW_MASK REG_GENMASK(29, 24) +#define PAL_PREC_MULTI_SEG_RED_UDW_MASK REG_GENMASK(29, 20) +#define PAL_PREC_MULTI_SEG_GREEN_LDW_MASK REG_GENMASK(19, 14) +#define PAL_PREC_MULTI_SEG_GREEN_UDW_MASK REG_GENMASK(19, 10) +#define PAL_PREC_MULTI_SEG_BLUE_LDW_MASK REG_GENMASK(9, 4) +#define PAL_PREC_MULTI_SEG_BLUE_UDW_MASK REG_GENMASK(9, 0) #define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \ _PAL_PREC_MULTI_SEG_INDEX_A, \ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c0df71d7d0ff..22635bbabf06 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -101,6 +101,11 @@ static signed long i915_fence_wait(struct dma_fence *fence, timeout); } +struct kmem_cache *i915_request_slab_cache(void) +{ + return global.slab_requests; +} + static void i915_fence_release(struct dma_fence *fence) { struct i915_request *rq = to_request(fence); @@ -115,6 +120,10 @@ static void i915_fence_release(struct dma_fence *fence) i915_sw_fence_fini(&rq->submit); i915_sw_fence_fini(&rq->semaphore); + /* Keep one request on each engine for reserved use under mempressure */ + if (!cmpxchg(&rq->engine->request_pool, NULL, rq)) + return; + kmem_cache_free(global.slab_requests, rq); } @@ -629,14 +638,22 @@ static void retire_requests(struct intel_timeline *tl) } static noinline struct i915_request * -request_alloc_slow(struct intel_timeline *tl, gfp_t gfp) +request_alloc_slow(struct intel_timeline *tl, + struct i915_request **rsvd, + gfp_t gfp) { struct i915_request *rq; - if (list_empty(&tl->requests)) - goto out; + /* If we cannot wait, dip into our reserves */ + if (!gfpflags_allow_blocking(gfp)) { + rq = xchg(rsvd, NULL); + if (!rq) /* Use the normal failure path for one final WARN */ + goto out; - if (!gfpflags_allow_blocking(gfp)) + return rq; + } + + if (list_empty(&tl->requests)) goto out; /* Move our oldest request to the slab-cache (if not in use!) */ @@ -721,7 +738,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq = kmem_cache_alloc(global.slab_requests, gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (unlikely(!rq)) { - rq = request_alloc_slow(tl, gfp); + rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp); if (!rq) { ret = -ENOMEM; goto err_unreserve; @@ -1444,9 +1461,7 @@ void i915_request_add(struct i915_request *rq) if (list_empty(&rq->sched.signalers_list)) attr.priority |= I915_PRIORITY_WAIT; - local_bh_disable(); __i915_request_queue(rq, &attr); - local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ mutex_unlock(&tl->mutex); } diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 3c552bfea67a..d8ce908e1346 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -300,6 +300,8 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence) return fence->ops == &i915_fence_ops; } +struct kmem_cache *i915_request_slab_cache(void); + struct i915_request * __must_check __i915_request_create(struct intel_context *ce, gfp_t gfp); struct i915_request * __must_check diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 68b06a7ba667..37cfcf5b321b 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -209,6 +209,12 @@ static void kick_submission(struct intel_engine_cs *engine, if (!inflight) goto unlock; + ENGINE_TRACE(engine, + "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n", + prio, + rq->fence.context, rq->fence.seqno, + inflight->fence.context, inflight->fence.seqno, + inflight->sched.attr.priority); engine->execlists.queue_priority_hint = prio; /* @@ -464,11 +470,15 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node, if (!dep) return -ENOMEM; + local_bh_disable(); + if (!__i915_sched_node_add_dependency(node, signal, dep, I915_DEPENDENCY_EXTERNAL | I915_DEPENDENCY_ALLOC)) i915_dependency_free(dep); + local_bh_enable(); /* kick submission tasklet */ + return 0; } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index a3d38e089b6e..7daf81f55c90 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -421,7 +421,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t) if (!fence) return; - pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n", + pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno, diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c index 997b2998f1f2..a3a81bb8f2c3 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c @@ -38,7 +38,10 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) if (!f->dma.error) { dma_fence_get(&f->dma); - queue_work(system_unbound_wq, &f->work); + if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags)) + fence_work(&f->work); + else + queue_work(system_unbound_wq, &f->work); } else { fence_complete(f); } diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h index 3a22b287e201..2c409f11c5c5 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence_work.h +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h @@ -32,6 +32,10 @@ struct dma_fence_work { const struct dma_fence_work_ops *ops; }; +enum { + DMA_FENCE_WORK_IMM = DMA_FENCE_FLAG_USER_BITS, +}; + void dma_fence_work_init(struct dma_fence_work *f, const struct dma_fence_work_ops *ops); int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal); @@ -41,4 +45,23 @@ static inline void dma_fence_work_commit(struct dma_fence_work *f) i915_sw_fence_commit(&f->chain); } +/** + * dma_fence_work_commit_imm: Commit the fence, and if possible execute locally. + * @f: the fenced worker + * + * Instead of always scheduling a worker to execute the callback (see + * dma_fence_work_commit()), we try to execute the callback immediately in + * the local context. It is required that the fence be committed before it + * is published, and that no other threads try to tamper with the number + * of asynchronous waits on the fence (or else the callback will be + * executed in the wrong context, i.e. not the callers). + */ +static inline void dma_fence_work_commit_imm(struct dma_fence_work *f) +{ + if (atomic_read(&f->chain.pending) <= 1) + __set_bit(DMA_FENCE_WORK_IMM, &f->dma.flags); + + dma_fence_work_commit(f); +} + #endif /* I915_SW_FENCE_WORK_H */ diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index ed69b5d4a375..b3a24eac21f1 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -20,14 +20,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, } if (state == VGA_SWITCHEROO_ON) { - pr_info("switched on\n"); + drm_info(&i915->drm, "switched on\n"); i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(pdev, PCI_D0); i915_resume_switcheroo(i915); i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; } else { - pr_info("switched off\n"); + drm_info(&i915->drm, "switched off\n"); i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; i915_suspend_switcheroo(i915, pmm); i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 029854ae65fc..e28eae4a8f70 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -101,5 +101,6 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout) */ barrier(); - mod_timer(t, jiffies + timeout); + /* Keep t->expires = 0 reserved to indicate a canceled timer. */ + mod_timer(t, jiffies + timeout ?: 1); } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 08699fa069aa..f0383a68c981 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -608,18 +608,6 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) return true; } -static void assert_bind_count(const struct drm_i915_gem_object *obj) -{ - /* - * Combine the assertion that the object is bound and that we have - * pinned its pages. But we should never have bound the object - * more than we have pinned its pages. (For complete accuracy, we - * assume that no else is pinning the pages, but as a rough assertion - * that we will not run into problems later, this will do!) - */ - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count)); -} - /** * i915_vma_insert - finds a slot for the vma in its address space * @vma: the vma @@ -738,12 +726,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); - if (vma->obj) { - struct drm_i915_gem_object *obj = vma->obj; - - atomic_inc(&obj->bind_count); - assert_bind_count(obj); - } list_add_tail(&vma->vm_link, &vma->vm->bound_list); return 0; @@ -761,12 +743,6 @@ i915_vma_detach(struct i915_vma *vma) * it to be reaped by the shrinker. */ list_del(&vma->vm_link); - if (vma->obj) { - struct drm_i915_gem_object *obj = vma->obj; - - assert_bind_count(obj); - atomic_dec(&obj->bind_count); - } } static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) @@ -913,11 +889,30 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) if (flags & PIN_GLOBAL) wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); - /* No more allocations allowed once we hold vm->mutex */ - err = mutex_lock_interruptible(&vma->vm->mutex); + /* + * Differentiate between user/kernel vma inside the aliasing-ppgtt. + * + * We conflate the Global GTT with the user's vma when using the + * aliasing-ppgtt, but it is still vitally important to try and + * keep the use cases distinct. For example, userptr objects are + * not allowed inside the Global GTT as that will cause lock + * inversions when we have to evict them the mmu_notifier callbacks - + * but they are allowed to be part of the user ppGTT which can never + * be mapped. As such we try to give the distinct users of the same + * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt + * and i915_ppgtt separate]. + * + * NB this may cause us to mask real lock inversions -- while the + * code is safe today, lockdep may not be able to spot future + * transgressions. + */ + err = mutex_lock_interruptible_nested(&vma->vm->mutex, + !(flags & PIN_GLOBAL)); if (err) goto err_fence; + /* No more allocations allowed now we hold vm->mutex */ + if (unlikely(i915_vma_is_closed(vma))) { err = -ENOENT; goto err_unlock; @@ -980,7 +975,7 @@ err_unlock: mutex_unlock(&vma->vm->mutex); err_fence: if (work) - dma_fence_work_commit(&work->base); + dma_fence_work_commit_imm(&work->base); if (wakeref) intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); err_pages: @@ -1172,7 +1167,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) GEM_BUG_ON(!i915_vma_is_pinned(vma)); /* Wait for the vma to be bound before we start! */ - err = i915_request_await_active(rq, &vma->active, 0); + err = i915_request_await_active(rq, &vma->active, + I915_ACTIVE_AWAIT_EXCL); if (err) return err; @@ -1213,6 +1209,10 @@ int i915_vma_move_to_active(struct i915_vma *vma, dma_resv_add_shared_fence(vma->resv, &rq->fence); obj->write_domain = 0; } + + if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) + i915_active_add_request(&vma->fence->active, rq); + obj->read_domains |= I915_GEM_GPU_DOMAINS; obj->mm.dirty = true; @@ -1226,18 +1226,6 @@ int __i915_vma_unbind(struct i915_vma *vma) lockdep_assert_held(&vma->vm->mutex); - /* - * First wait upon any activity as retiring the request may - * have side-effects such as unpinning or even unbinding this vma. - * - * XXX Actually waiting under the vm->mutex is a hinderance and - * should be pipelined wherever possible. In cases where that is - * unavoidable, we should lift the wait to before the mutex. - */ - ret = i915_vma_sync(vma); - if (ret) - return ret; - if (i915_vma_is_pinned(vma)) { vma_print_allocator(vma, "is pinned"); return -EAGAIN; @@ -1259,6 +1247,9 @@ int __i915_vma_unbind(struct i915_vma *vma) GEM_BUG_ON(i915_vma_is_active(vma)); if (i915_vma_is_map_and_fenceable(vma)) { + /* Force a pagefault for domain tracking on next user access */ + i915_vma_revoke_mmap(vma); + /* * Check that we have flushed all writes through the GGTT * before the unbind, other due to non-strict nature of those @@ -1275,12 +1266,7 @@ int __i915_vma_unbind(struct i915_vma *vma) i915_vma_flush_writes(vma); /* release the fence reg _after_ flushing */ - ret = i915_vma_revoke_fence(vma); - if (ret) - return ret; - - /* Force a pagefault for domain tracking on next user access */ - i915_vma_revoke_mmap(vma); + i915_vma_revoke_fence(vma); __i915_vma_iounmap(vma); clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); @@ -1311,16 +1297,21 @@ int i915_vma_unbind(struct i915_vma *vma) if (!drm_mm_node_allocated(&vma->node)) return 0; - if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) - /* XXX not always required: nop_clear_range */ - wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); - /* Optimistic wait before taking the mutex */ err = i915_vma_sync(vma); if (err) goto out_rpm; - err = mutex_lock_interruptible(&vm->mutex); + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + /* XXX not always required: nop_clear_range */ + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + + err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); if (err) goto out_rpm; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index e1ced1df13e1..8ad1daabcd58 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -30,10 +30,10 @@ #include <drm/drm_mm.h> +#include "gt/intel_ggtt_fencing.h" #include "gem/i915_gem_object.h" #include "i915_gem_gtt.h" -#include "i915_gem_fence_reg.h" #include "i915_active.h" #include "i915_request.h" @@ -326,7 +326,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma) * True if the vma has a fence, false otherwise. */ int __must_check i915_vma_pin_fence(struct i915_vma *vma); -int __must_check i915_vma_revoke_fence(struct i915_vma *vma); +void i915_vma_revoke_fence(struct i915_vma *vma); int __i915_vma_pin_fence(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d7fe12734db8..db8496b4c38d 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -980,35 +980,32 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) drm_info(&dev_priv->drm, "Display fused off, disabling\n"); info->pipe_mask = 0; + info->cpu_transcoder_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); info->pipe_mask &= ~BIT(PIPE_C); + info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { u32 dfsm = I915_READ(SKL_DFSM); - u8 enabled_mask = info->pipe_mask; - - if (dfsm & SKL_DFSM_PIPE_A_DISABLE) - enabled_mask &= ~BIT(PIPE_A); - if (dfsm & SKL_DFSM_PIPE_B_DISABLE) - enabled_mask &= ~BIT(PIPE_B); - if (dfsm & SKL_DFSM_PIPE_C_DISABLE) - enabled_mask &= ~BIT(PIPE_C); - if (INTEL_GEN(dev_priv) >= 12 && - (dfsm & TGL_DFSM_PIPE_D_DISABLE)) - enabled_mask &= ~BIT(PIPE_D); - /* - * At least one pipe should be enabled and if there are - * disabled pipes, they should be the last ones, with no holes - * in the mask. - */ - if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1)) - drm_err(&dev_priv->drm, - "invalid pipe fuse configuration: enabled_mask=0x%x\n", - enabled_mask); - else - info->pipe_mask = enabled_mask; + if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { + info->pipe_mask &= ~BIT(PIPE_A); + info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + } + if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { + info->pipe_mask &= ~BIT(PIPE_B); + info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + } + if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { + info->pipe_mask &= ~BIT(PIPE_C); + info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + } + if (INTEL_GEN(dev_priv) >= 12 && + (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { + info->pipe_mask &= ~BIT(PIPE_D); + info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + } if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) info->display.has_hdcp = 0; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 1ecb9df2de91..cce6a72c5ebc 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -168,6 +168,7 @@ struct intel_device_info { u32 display_mmio_offset; u8 pipe_mask; + u8 cpu_transcoder_mask; #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 6b922efb1d7c..8aa12cad93ce 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -495,6 +495,5 @@ void intel_dram_edram_detect(struct drm_i915_private *i915) else i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap); - dev_info(i915->drm.dev, - "Found %uMB of eDRAM\n", i915->edram_size_mb); + drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8375054ba27d..b632b6bb9c3e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4016,6 +4016,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int color_plane); static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, int level, + unsigned int latency, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */); @@ -4038,7 +4039,9 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, drm_WARN_ON(&dev_priv->drm, ret); for (level = 0; level <= max_level; level++) { - skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm); + unsigned int latency = dev_priv->wm.skl_latency[level]; + + skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm); if (wm.min_ddb_alloc == U16_MAX) break; @@ -4972,12 +4975,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, int level, + unsigned int latency, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; u32 res_blocks, res_lines, min_ddb_alloc = 0; @@ -5106,9 +5109,10 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, for (level = 0; level <= max_level; level++) { struct skl_wm_level *result = &levels[level]; + unsigned int latency = dev_priv->wm.skl_latency[level]; - skl_compute_plane_wm(crtc_state, level, wm_params, - result_prev, result); + skl_compute_plane_wm(crtc_state, level, latency, + wm_params, result_prev, result); result_prev = result; } diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 1447e7516cb7..3f13baaef058 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -60,7 +60,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915) * to the Valleyview P-unit and not all sideband communications. */ if (IS_VALLEYVIEW(i915)) { - pm_qos_update_request(&i915->sb_qos, 0); + cpu_latency_qos_update_request(&i915->sb_qos, 0); on_each_cpu(ping, NULL, 1); } } @@ -68,7 +68,8 @@ static void __vlv_punit_get(struct drm_i915_private *i915) static void __vlv_punit_put(struct drm_i915_private *i915) { if (IS_VALLEYVIEW(i915)) - pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&i915->sb_qos, + PM_QOS_DEFAULT_VALUE); iosf_mbi_punit_release(); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index abb18b90d7c3..fa86b7ab2d99 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -665,7 +665,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) mmio_debug_resume(uncore->debug); if (check_for_unclaimed_mmio(uncore)) - dev_info(uncore->i915->drm.dev, + drm_info(&uncore->i915->drm, "Invalid mmio detected during user access\n"); spin_unlock(&uncore->debug->lock); @@ -735,6 +735,28 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore, } /** + * intel_uncore_forcewake_flush - flush the delayed release + * @uncore: the intel_uncore structure + * @fw_domains: forcewake domains to flush + */ +void intel_uncore_forcewake_flush(struct intel_uncore *uncore, + enum forcewake_domains fw_domains) +{ + struct intel_uncore_forcewake_domain *domain; + unsigned int tmp; + + if (!uncore->funcs.force_wake_put) + return; + + fw_domains &= uncore->fw_domains; + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + WRITE_ONCE(domain->active, false); + if (hrtimer_cancel(&domain->timer)) + intel_uncore_fw_release_timer(&domain->timer); + } +} + +/** * intel_uncore_forcewake_put__locked - grab forcewake domain references * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to get reference on diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index dcfa243892c6..8d3aa8b9acf9 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -209,7 +209,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains domains); void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains domains); -/* Like above but the caller must manage the uncore.lock itself. +void intel_uncore_forcewake_flush(struct intel_uncore *uncore, + enum forcewake_domains fw_domains); + +/* + * Like above but the caller must manage the uncore.lock itself. * Must be used with I915_READ_FW and friends. */ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 8fbf6f4d3f26..dfd87d082218 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -70,11 +70,12 @@ unlock: void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) { - INTEL_WAKEREF_BUG_ON(work_pending(&wf->work)); + INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work)); /* Assume we are not in process context and so cannot sleep. */ if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { - schedule_work(&wf->work); + mod_delayed_work(system_wq, &wf->work, + FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags)); return; } @@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) static void __intel_wakeref_put_work(struct work_struct *wrk) { - struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work); + struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work); if (atomic_add_unless(&wf->count, -1, 1)) return; @@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf, atomic_set(&wf->count, 0); wf->wakeref = 0; - INIT_WORK(&wf->work, __intel_wakeref_put_work); - lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0); + INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work); + lockdep_init_map(&wf->work.work.lockdep_map, + "wakeref.work", &key->work, 0); } int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 7d1e676b71ef..545c8f277c46 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -8,6 +8,7 @@ #define INTEL_WAKEREF_H #include <linux/atomic.h> +#include <linux/bitfield.h> #include <linux/bits.h> #include <linux/lockdep.h> #include <linux/mutex.h> @@ -41,7 +42,7 @@ struct intel_wakeref { struct intel_runtime_pm *rpm; const struct intel_wakeref_ops *ops; - struct work_struct work; + struct delayed_work work; }; struct intel_wakeref_lockclass { @@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) return atomic_inc_not_zero(&wf->count); } +enum { + INTEL_WAKEREF_PUT_ASYNC_BIT = 0, + __INTEL_WAKEREF_PUT_LAST_BIT__ +}; + /** * intel_wakeref_put_flags: Release the wakeref * @wf: the wakeref @@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) */ static inline void __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) -#define INTEL_WAKEREF_PUT_ASYNC BIT(0) +#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT) +#define INTEL_WAKEREF_PUT_DELAY \ + GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__) { INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) @@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf) __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); } +static inline void +intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) +{ + __intel_wakeref_put(wf, + INTEL_WAKEREF_PUT_ASYNC | + FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); +} + /** * intel_wakeref_lock: Lock the wakeref (mutex) * @wf: the wakeref @@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf) { mutex_lock(&wf->mutex); mutex_unlock(&wf->mutex); - flush_work(&wf->work); + flush_delayed_work(&wf->work); } /** diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 2bb9f9f9a50a..2186386a45c8 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -86,7 +86,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm) else wopcm->size = GEN9_WOPCM_SIZE; - DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024); + drm_dbg(&i915->drm, "WOPCM: %uK\n", wopcm->size / 1024); } static inline u32 context_reserved_size(struct drm_i915_private *i915) @@ -112,7 +112,7 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915, offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET; if (offset > guc_wopcm_size || (guc_wopcm_size - offset) < sizeof(u32)) { - dev_err(i915->drm.dev, + drm_err(&i915->drm, "WOPCM: invalid GuC region size: %uK < %uK\n", guc_wopcm_size / SZ_1K, (u32)(offset + sizeof(u32)) / SZ_1K); @@ -131,7 +131,7 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915, * firmware uploading would fail. */ if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) { - dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n", + drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n", intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC), (guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K, huc_fw_size / 1024); @@ -166,7 +166,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size, size = wopcm_size - ctx_rsvd; if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) { - dev_err(i915->drm.dev, + drm_err(&i915->drm, "WOPCM: invalid GuC region layout: %uK + %uK > %uK\n", guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K, size / SZ_1K); @@ -175,7 +175,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size, size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED; if (unlikely(guc_wopcm_size < size)) { - dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n", + drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n", intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc_wopcm_size / SZ_1K, size / SZ_1K); return false; @@ -183,7 +183,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size, size = huc_fw_size + WOPCM_RESERVED_SIZE; if (unlikely(guc_wopcm_base < size)) { - dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n", + drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n", intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC), guc_wopcm_base / SZ_1K, size / SZ_1K); return false; @@ -242,10 +242,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm) return; if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) { - DRM_DEV_DEBUG_DRIVER(i915->drm.dev, - "GuC WOPCM is already locked [%uK, %uK)\n", - guc_wopcm_base / SZ_1K, - guc_wopcm_size / SZ_1K); + drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n", + guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K); goto check; } @@ -266,8 +264,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm) guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base; guc_wopcm_size &= GUC_WOPCM_SIZE_MASK; - DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n", - guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K); + drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n", + guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K); check: if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size, diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c deleted file mode 100644 index 14da5c3b569d..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_bdw.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x000000a0 }, - { _MMIO(0x9888), 0x198b0000 }, - { _MMIO(0x9888), 0x078b0066 }, - { _MMIO(0x9888), 0x118b0000 }, - { _MMIO(0x9888), 0x258b0000 }, - { _MMIO(0x9888), 0x21850008 }, - { _MMIO(0x9888), 0x0d834000 }, - { _MMIO(0x9888), 0x07844000 }, - { _MMIO(0x9888), 0x17804000 }, - { _MMIO(0x9888), 0x21800000 }, - { _MMIO(0x9888), 0x4f800000 }, - { _MMIO(0x9888), 0x41800000 }, - { _MMIO(0x9888), 0x31800000 }, - { _MMIO(0x9840), 0x00000080 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "d6de6f55-e526-4f79-a6a6-d7315c09044e", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h deleted file mode 100644 index 0cee3334f0a6..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_BDW_H__ -#define __I915_OA_BDW_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c deleted file mode 100644 index 3e785bafcf99..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_bxt.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x19800000 }, - { _MMIO(0x9888), 0x07800063 }, - { _MMIO(0x9888), 0x11800000 }, - { _MMIO(0x9888), 0x23810008 }, - { _MMIO(0x9888), 0x1d950400 }, - { _MMIO(0x9888), 0x0f922000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x55900000 }, - { _MMIO(0x9888), 0x47900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "5ee72f5c-092f-421e-8b70-225f7c3e9612", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h deleted file mode 100644 index 0bdf391323ec..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_BXT_H__ -#define __I915_OA_BXT_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c deleted file mode 100644 index 0ea86f70a06c..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_cflgt2.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "74fb4902-d3d3-4237-9e90-cbdc68d0a446", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h deleted file mode 100644 index 6b862280ab78..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CFLGT2_H__ -#define __I915_OA_CFLGT2_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c deleted file mode 100644 index fc632dd890bf..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_cflgt3.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "577e8e2c-3fa0-4875-8743-3538d585e3b0", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h deleted file mode 100644 index 4ca9d8f89b2f..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CFLGT3_H__ -#define __I915_OA_CFLGT3_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c deleted file mode 100644 index 6cd4e9921a8a..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_chv.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_chv.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x000000a0 }, - { _MMIO(0x9888), 0x59800000 }, - { _MMIO(0x9888), 0x59800001 }, - { _MMIO(0x9888), 0x338b0000 }, - { _MMIO(0x9888), 0x258b0066 }, - { _MMIO(0x9888), 0x058b0000 }, - { _MMIO(0x9888), 0x038b0000 }, - { _MMIO(0x9888), 0x03844000 }, - { _MMIO(0x9888), 0x47800080 }, - { _MMIO(0x9888), 0x57800000 }, - { _MMIO(0x1823a4), 0x00000000 }, - { _MMIO(0x9888), 0x59800000 }, - { _MMIO(0x9840), 0x00000080 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "4a534b07-cba3-414d-8d60-874830e883aa", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h deleted file mode 100644 index 3cac7bbc9c71..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_chv.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CHV_H__ -#define __I915_OA_CHV_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c deleted file mode 100644 index 1041e8914993..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_cnl.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x0000ffff }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x0000ffff }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x0000ffff }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0xd04), 0x00000200 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x17060000 }, - { _MMIO(0x9840), 0x00000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x13034000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x07060066 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x05060000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x0f080040 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x07091000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x0f041000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x1d004000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x35000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x49000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x3d000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x31000000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "db41edd4-d8e7-4730-ad11-b9a2d6833503", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h deleted file mode 100644 index db379f5fcbb9..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CNL_H__ -#define __I915_OA_CNL_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c deleted file mode 100644 index bd15ebe9aeeb..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_glk.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_glk.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x19800000 }, - { _MMIO(0x9888), 0x07800063 }, - { _MMIO(0x9888), 0x11800000 }, - { _MMIO(0x9888), 0x23810008 }, - { _MMIO(0x9888), 0x1d950400 }, - { _MMIO(0x9888), 0x0f922000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x55900000 }, - { _MMIO(0x9888), 0x47900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "dd3fd789-e783-4204-8cd0-b671bbccb0cf", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h deleted file mode 100644 index 779f343efd11..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_glk.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_GLK_H__ -#define __I915_OA_GLK_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c deleted file mode 100644 index 133721a8619f..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c +++ /dev/null @@ -1,118 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_hsw.h" - -static const struct i915_oa_reg b_counter_config_render_basic[] = { - { _MMIO(0x2724), 0x00800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2714), 0x00800000 }, - { _MMIO(0x2710), 0x00000000 }, -}; - -static const struct i915_oa_reg flex_eu_config_render_basic[] = { -}; - -static const struct i915_oa_reg mux_config_render_basic[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x253a4), 0x01600000 }, - { _MMIO(0x25440), 0x00100000 }, - { _MMIO(0x25128), 0x00000000 }, - { _MMIO(0x2691c), 0x00000800 }, - { _MMIO(0x26aa0), 0x01500000 }, - { _MMIO(0x26b9c), 0x00006000 }, - { _MMIO(0x2791c), 0x00000800 }, - { _MMIO(0x27aa0), 0x01500000 }, - { _MMIO(0x27b9c), 0x00006000 }, - { _MMIO(0x2641c), 0x00000400 }, - { _MMIO(0x25380), 0x00000010 }, - { _MMIO(0x2538c), 0x00000000 }, - { _MMIO(0x25384), 0x0800aaaa }, - { _MMIO(0x25400), 0x00000004 }, - { _MMIO(0x2540c), 0x06029000 }, - { _MMIO(0x25410), 0x00000002 }, - { _MMIO(0x25404), 0x5c30ffff }, - { _MMIO(0x25100), 0x00000016 }, - { _MMIO(0x25110), 0x00000400 }, - { _MMIO(0x25104), 0x00000000 }, - { _MMIO(0x26804), 0x00001211 }, - { _MMIO(0x26884), 0x00000100 }, - { _MMIO(0x26900), 0x00000002 }, - { _MMIO(0x26908), 0x00700000 }, - { _MMIO(0x26904), 0x00000000 }, - { _MMIO(0x26984), 0x00001022 }, - { _MMIO(0x26a04), 0x00000011 }, - { _MMIO(0x26a80), 0x00000006 }, - { _MMIO(0x26a88), 0x00000c02 }, - { _MMIO(0x26a84), 0x00000000 }, - { _MMIO(0x26b04), 0x00001000 }, - { _MMIO(0x26b80), 0x00000002 }, - { _MMIO(0x26b8c), 0x00000007 }, - { _MMIO(0x26b84), 0x00000000 }, - { _MMIO(0x27804), 0x00004844 }, - { _MMIO(0x27884), 0x00000400 }, - { _MMIO(0x27900), 0x00000002 }, - { _MMIO(0x27908), 0x0e000000 }, - { _MMIO(0x27904), 0x00000000 }, - { _MMIO(0x27984), 0x00004088 }, - { _MMIO(0x27a04), 0x00000044 }, - { _MMIO(0x27a80), 0x00000006 }, - { _MMIO(0x27a88), 0x00018040 }, - { _MMIO(0x27a84), 0x00000000 }, - { _MMIO(0x27b04), 0x00004000 }, - { _MMIO(0x27b80), 0x00000002 }, - { _MMIO(0x27b8c), 0x000000e0 }, - { _MMIO(0x27b84), 0x00000000 }, - { _MMIO(0x26104), 0x00002222 }, - { _MMIO(0x26184), 0x0c006666 }, - { _MMIO(0x26284), 0x04000000 }, - { _MMIO(0x26304), 0x04000000 }, - { _MMIO(0x26400), 0x00000002 }, - { _MMIO(0x26410), 0x000000a0 }, - { _MMIO(0x26404), 0x00000000 }, - { _MMIO(0x25420), 0x04108020 }, - { _MMIO(0x25424), 0x1284a420 }, - { _MMIO(0x2541c), 0x00000000 }, - { _MMIO(0x25428), 0x00042049 }, -}; - -static ssize_t -show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "403d8832-1a27-4aa6-a64e-f5389ce7b212", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_render_basic; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic); - - dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h deleted file mode 100644 index ba97f732f136..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_HSW_H__ -#define __I915_OA_HSW_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c deleted file mode 100644 index 2d92041b754f..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_icl.c +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_icl.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x0000ffff }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x0000ffff }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x0000ffff }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0xd04), 0x00000200 }, - { _MMIO(0x9840), 0x00000000 }, - { _MMIO(0x9884), 0x00000000 }, - { _MMIO(0x9888), 0x10060000 }, - { _MMIO(0x9888), 0x22060000 }, - { _MMIO(0x9888), 0x16060000 }, - { _MMIO(0x9888), 0x24060000 }, - { _MMIO(0x9888), 0x18060000 }, - { _MMIO(0x9888), 0x1a060000 }, - { _MMIO(0x9888), 0x12060000 }, - { _MMIO(0x9888), 0x14060000 }, - { _MMIO(0x9888), 0x10060000 }, - { _MMIO(0x9888), 0x22060000 }, - { _MMIO(0x9884), 0x00000003 }, - { _MMIO(0x9888), 0x16130000 }, - { _MMIO(0x9888), 0x24000001 }, - { _MMIO(0x9888), 0x0e130056 }, - { _MMIO(0x9888), 0x10130000 }, - { _MMIO(0x9888), 0x1a130000 }, - { _MMIO(0x9888), 0x541f0001 }, - { _MMIO(0x9888), 0x181f0000 }, - { _MMIO(0x9888), 0x4c1f0000 }, - { _MMIO(0x9888), 0x301f0000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "a291665e-244b-4b76-9b9a-01de9d3c8068", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h deleted file mode 100644 index 5c64112d720e..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_icl.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_ICL_H__ -#define __I915_OA_ICL_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c deleted file mode 100644 index 1c3a67c9cfe0..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_kblgt2.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "baa3c7e4-52b6-4b85-801e-465a94b746dd", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h deleted file mode 100644 index 810532fa6b63..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_KBLGT2_H__ -#define __I915_OA_KBLGT2_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c deleted file mode 100644 index ebbe5a9c9fdc..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_kblgt3.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "f1792f32-6db2-4b50-b4b2-557128f1688d", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h deleted file mode 100644 index 13d70456fabd..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_KBLGT3_H__ -#define __I915_OA_KBLGT3_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c deleted file mode 100644 index 1bc359ed34e8..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_sklgt2.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810016 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "1651949f-0ac0-4cb1-a06f-dafd74a407d1", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h deleted file mode 100644 index fda70c51a6ec..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_SKLGT2_H__ -#define __I915_OA_SKLGT2_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c deleted file mode 100644 index 6e352f881310..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_sklgt3.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "2b985803-d3c9-4629-8a4f-634bfecba0e8", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h deleted file mode 100644 index df74eba5799e..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_SKLGT3_H__ -#define __I915_OA_SKLGT3_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c deleted file mode 100644 index 8f345115a306..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_sklgt4.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "882fa433-1f4a-4a67-a962-c741888fe5f5", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h deleted file mode 100644 index 378ab7ab78d5..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018-2019 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_SKLGT4_H__ -#define __I915_OA_SKLGT4_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c deleted file mode 100644 index a29d93707345..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include <linux/sysfs.h> - -#include "i915_drv.h" -#include "i915_oa_tgl.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0xD920), 0x00000000 }, - { _MMIO(0xD900), 0x00000000 }, - { _MMIO(0xD904), 0xF0800000 }, - { _MMIO(0xD910), 0x00000000 }, - { _MMIO(0xD914), 0xF0800000 }, - { _MMIO(0xDC40), 0x00FF0000 }, - { _MMIO(0xD940), 0x00000004 }, - { _MMIO(0xD944), 0x0000FFFF }, - { _MMIO(0xDC00), 0x00000004 }, - { _MMIO(0xDC04), 0x0000FFFF }, - { _MMIO(0xD948), 0x00000003 }, - { _MMIO(0xD94C), 0x0000FFFF }, - { _MMIO(0xDC08), 0x00000003 }, - { _MMIO(0xDC0C), 0x0000FFFF }, - { _MMIO(0xD950), 0x00000007 }, - { _MMIO(0xD954), 0x0000FFFF }, - { _MMIO(0xDC10), 0x00000007 }, - { _MMIO(0xDC14), 0x0000FFFF }, - { _MMIO(0xD958), 0x00100002 }, - { _MMIO(0xD95C), 0x0000FFF7 }, - { _MMIO(0xDC18), 0x00100002 }, - { _MMIO(0xDC1C), 0x0000FFF7 }, - { _MMIO(0xD960), 0x00100002 }, - { _MMIO(0xD964), 0x0000FFCF }, - { _MMIO(0xDC20), 0x00100002 }, - { _MMIO(0xDC24), 0x0000FFCF }, - { _MMIO(0xD968), 0x00100082 }, - { _MMIO(0xD96C), 0x0000FFEF }, - { _MMIO(0xDC28), 0x00100082 }, - { _MMIO(0xDC2C), 0x0000FFEF }, - { _MMIO(0xD970), 0x001000C2 }, - { _MMIO(0xD974), 0x0000FFE7 }, - { _MMIO(0xDC30), 0x001000C2 }, - { _MMIO(0xDC34), 0x0000FFE7 }, - { _MMIO(0xD978), 0x00100001 }, - { _MMIO(0xD97C), 0x0000FFE7 }, - { _MMIO(0xDC38), 0x00100001 }, - { _MMIO(0xDC3C), 0x0000FFE7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x0D04), 0x00000200 }, - { _MMIO(0x9840), 0x00000000 }, - { _MMIO(0x9884), 0x00000000 }, - { _MMIO(0x9888), 0x280E0000 }, - { _MMIO(0x9888), 0x1E0E0147 }, - { _MMIO(0x9888), 0x180E0000 }, - { _MMIO(0x9888), 0x160E0000 }, - { _MMIO(0x9888), 0x1E0F1000 }, - { _MMIO(0x9888), 0x1E104000 }, - { _MMIO(0x9888), 0x2E020100 }, - { _MMIO(0x9888), 0x2C030004 }, - { _MMIO(0x9888), 0x38003000 }, - { _MMIO(0x9888), 0x1E0A8000 }, - { _MMIO(0x9884), 0x00000003 }, - { _MMIO(0x9888), 0x49110000 }, - { _MMIO(0x9888), 0x5D101400 }, - { _MMIO(0x9888), 0x1D140020 }, - { _MMIO(0x9888), 0x1D1103A3 }, - { _MMIO(0x9888), 0x01110000 }, - { _MMIO(0x9888), 0x61111000 }, - { _MMIO(0x9888), 0x1F128000 }, - { _MMIO(0x9888), 0x17100000 }, - { _MMIO(0x9888), 0x55100630 }, - { _MMIO(0x9888), 0x57100000 }, - { _MMIO(0x9888), 0x31100000 }, - { _MMIO(0x9884), 0x00000003 }, - { _MMIO(0x9888), 0x65100002 }, - { _MMIO(0x9884), 0x00000000 }, - { _MMIO(0x9888), 0x42000001 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.test_config.uuid, - "80a833f0-2504-4321-8894-e9277844ce7b", - sizeof(dev_priv->perf.test_config.uuid)); - dev_priv->perf.test_config.id = 1; - - dev_priv->perf.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b"; - dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; - - dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; - - dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h deleted file mode 100644 index 4c25f0be825c..000000000000 --- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_TGL_H__ -#define __I915_OA_TGL_H__ - -struct drm_i915_private; - -void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 68bbb1580162..4002c984c2e0 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -153,7 +153,7 @@ static int live_active_wait(void *arg) if (IS_ERR(active)) return PTR_ERR(active); - i915_active_wait(&active->base); + __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE); if (!READ_ONCE(active->retired)) { struct drm_printer p = drm_err_printer(__func__); @@ -228,11 +228,11 @@ static int live_active_barrier(void *arg) } i915_active_release(&active->base); + if (err) + goto out; - if (err == 0) - err = i915_active_wait(&active->base); - - if (err == 0 && !READ_ONCE(active->retired)) { + __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE); + if (!READ_ONCE(active->retired)) { pr_err("i915_active not retired after flushing barriers!\n"); err = -EINVAL; } @@ -277,7 +277,7 @@ static struct intel_engine_cs *node_to_barrier(struct active_node *it) void i915_active_print(struct i915_active *ref, struct drm_printer *m) { - drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire); + drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire); drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count)); drm_printf(m, "\tpreallocated barriers? %s\n", yesno(!llist_empty(&ref->preallocated_barriers))); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 623759b73bb4..88d400b9df88 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -125,8 +125,6 @@ static void pm_resume(struct drm_i915_private *i915) */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { i915_ggtt_resume(&i915->ggtt); - i915_gem_restore_fences(&i915->ggtt); - i915_gem_resume(i915); } } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 06ef88510209..028baae9631f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -45,8 +45,8 @@ static void quirk_add(struct drm_i915_gem_object *obj, static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects) { - unsigned long unbound, bound, count; struct drm_i915_gem_object *obj; + unsigned long count; count = 0; do { @@ -72,30 +72,6 @@ static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects) pr_debug("Filled GGTT with %lu pages [%llu total]\n", count, ggtt->vm.total / PAGE_SIZE); - bound = 0; - unbound = 0; - list_for_each_entry(obj, objects, st_link) { - GEM_BUG_ON(!obj->mm.quirked); - - if (atomic_read(&obj->bind_count)) - bound++; - else - unbound++; - } - GEM_BUG_ON(bound + unbound != count); - - if (unbound) { - pr_err("%s: Found %lu objects unbound, expected %u!\n", - __func__, unbound, 0); - return -EINVAL; - } - - if (bound != count) { - pr_err("%s: Found %lu objects bound, expected %lu!\n", - __func__, bound, count); - return -EINVAL; - } - if (list_empty(&ggtt->vm.bound_list)) { pr_err("No objects on the GGTT inactive list!\n"); return -EINVAL; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index b342bef5e7c9..5d2a02fcf595 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1229,7 +1229,6 @@ static void track_vma_bind(struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; - atomic_inc(&obj->bind_count); /* track for eviction later */ __i915_gem_object_pin_pages(obj); GEM_BUG_ON(vma->pages); diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index d1a1568c47ba..5608fab98d5d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -14,10 +14,85 @@ #include "igt_flush_test.h" #include "lib_sw_fence.h" +#define TEST_OA_CONFIG_UUID "12345678-1234-1234-1234-1234567890ab" + +static int +alloc_empty_config(struct i915_perf *perf) +{ + struct i915_oa_config *oa_config; + + oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); + if (!oa_config) + return -ENOMEM; + + oa_config->perf = perf; + kref_init(&oa_config->ref); + + strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid)); + + mutex_lock(&perf->metrics_lock); + + oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL); + if (oa_config->id < 0) { + mutex_unlock(&perf->metrics_lock); + i915_oa_config_put(oa_config); + return -ENOMEM; + } + + mutex_unlock(&perf->metrics_lock); + + return 0; +} + +static void +destroy_empty_config(struct i915_perf *perf) +{ + struct i915_oa_config *oa_config = NULL, *tmp; + int id; + + mutex_lock(&perf->metrics_lock); + + idr_for_each_entry(&perf->metrics_idr, tmp, id) { + if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) { + oa_config = tmp; + break; + } + } + + if (oa_config) + idr_remove(&perf->metrics_idr, oa_config->id); + + mutex_unlock(&perf->metrics_lock); + + if (oa_config) + i915_oa_config_put(oa_config); +} + +static struct i915_oa_config * +get_empty_config(struct i915_perf *perf) +{ + struct i915_oa_config *oa_config = NULL, *tmp; + int id; + + mutex_lock(&perf->metrics_lock); + + idr_for_each_entry(&perf->metrics_idr, tmp, id) { + if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) { + oa_config = i915_oa_config_get(tmp); + break; + } + } + + mutex_unlock(&perf->metrics_lock); + + return oa_config; +} + static struct i915_perf_stream * test_stream(struct i915_perf *perf) { struct drm_i915_perf_open_param param = {}; + struct i915_oa_config *oa_config = get_empty_config(perf); struct perf_open_properties props = { .engine = intel_engine_lookup_user(perf->i915, I915_ENGINE_CLASS_RENDER, @@ -25,13 +100,19 @@ test_stream(struct i915_perf *perf) .sample_flags = SAMPLE_OA_REPORT, .oa_format = IS_GEN(perf->i915, 12) ? I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8, - .metrics_set = 1, }; struct i915_perf_stream *stream; + if (!oa_config) + return NULL; + + props.metrics_set = oa_config->id; + stream = kzalloc(sizeof(*stream), GFP_KERNEL); - if (!stream) + if (!stream) { + i915_oa_config_put(oa_config); return NULL; + } stream->perf = perf; @@ -42,6 +123,8 @@ test_stream(struct i915_perf *perf) } mutex_unlock(&perf->lock); + i915_oa_config_put(oa_config); + return stream; } @@ -206,6 +289,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915) SUBTEST(live_noa_delay), }; struct i915_perf *perf = &i915->perf; + int err; if (!perf->metrics_kobj || !perf->ops.enable_metric_set) return 0; @@ -213,5 +297,13 @@ int i915_perf_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_subtests(tests, i915); + err = alloc_empty_config(&i915->perf); + if (err) + return err; + + err = i915_subtests(tests, i915); + + destroy_empty_config(&i915->perf); + + return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index f89d9c42f1fa..1dab0360f76a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -28,6 +28,7 @@ #include "gem/selftests/mock_context.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "i915_random.h" @@ -51,6 +52,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915) return count; } +static struct intel_engine_cs *rcs0(struct drm_i915_private *i915) +{ + return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0); +} + static int igt_add_request(void *arg) { struct drm_i915_private *i915 = arg; @@ -58,7 +64,7 @@ static int igt_add_request(void *arg) /* Basic preliminary test to create a request and let it loose! */ - request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10); + request = mock_request(rcs0(i915)->kernel_context, HZ / 10); if (!request) return -ENOMEM; @@ -76,7 +82,7 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ - request = mock_request(i915->engine[RCS0]->kernel_context, T); + request = mock_request(rcs0(i915)->kernel_context, T); if (!request) return -ENOMEM; @@ -145,7 +151,7 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ - request = mock_request(i915->engine[RCS0]->kernel_context, T); + request = mock_request(rcs0(i915)->kernel_context, T); if (!request) return -ENOMEM; @@ -420,7 +426,7 @@ static int mock_breadcrumbs_smoketest(void *arg) { struct drm_i915_private *i915 = arg; struct smoketest t = { - .engine = i915->engine[RCS0], + .engine = rcs0(i915), .ncontexts = 1024, .max_batch = 1024, .request_alloc = __mock_request_alloc @@ -1233,7 +1239,7 @@ static int live_parallel_engines(void *arg) struct igt_live_test t; unsigned int idx; - snprintf(name, sizeof(name), "%pS", fn); + snprintf(name, sizeof(name), "%ps", fn); err = igt_live_test_begin(&t, i915, __func__, name); if (err) break; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 2a1d4ba1f9f3..6e80d99048e4 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -594,8 +594,11 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type, void *addr; obj = i915_gem_object_create_region(mr, size, 0); - if (IS_ERR(obj)) + if (IS_ERR(obj)) { + if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */ + return ERR_PTR(-ENODEV); return obj; + } addr = i915_gem_object_pin_map(obj, type); if (IS_ERR(addr)) { diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 754d0eb6beaa..9b105b811f1f 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -25,6 +25,8 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <drm/drm_managed.h> + #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "gt/mock_engine.h" @@ -55,6 +57,9 @@ static void mock_device_release(struct drm_device *dev) { struct drm_i915_private *i915 = to_i915(dev); + if (!i915->do_release) + goto out; + mock_device_flush(i915); intel_gt_driver_remove(&i915->gt); @@ -71,8 +76,9 @@ static void mock_device_release(struct drm_device *dev) drm_mode_config_cleanup(&i915->drm); - drm_dev_fini(&i915->drm); +out: put_device(&i915->drm.pdev->dev); + i915->drm.pdev = NULL; } static struct drm_driver mock_driver = { @@ -114,9 +120,14 @@ struct drm_i915_private *mock_gem_device(void) struct pci_dev *pdev; int err; - pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL); + pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); if (!pdev) - goto err; + return NULL; + i915 = kzalloc(sizeof(*i915), GFP_KERNEL); + if (!i915) { + kfree(pdev); + return NULL; + } device_initialize(&pdev->dev); pdev->class = PCI_BASE_CLASS_DISPLAY << 16; @@ -129,7 +140,6 @@ struct drm_i915_private *mock_gem_device(void) pdev->dev.archdata.iommu = (void *)-1; #endif - i915 = (struct drm_i915_private *)(pdev + 1); pci_set_drvdata(pdev, i915); dev_pm_domain_set(&pdev->dev, &pm_domain); @@ -141,9 +151,13 @@ struct drm_i915_private *mock_gem_device(void) err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); if (err) { pr_err("Failed to initialise mock GEM device: err=%d\n", err); - goto put_device; + put_device(&pdev->dev); + kfree(i915); + + return NULL; } i915->drm.pdev = pdev; + drmm_add_final_kfree(&i915->drm, i915); intel_runtime_pm_init_early(&i915->runtime_pm); @@ -178,16 +192,18 @@ struct drm_i915_private *mock_gem_device(void) mkwrite_device_info(i915)->engine_mask = BIT(0); - i915->engine[RCS0] = mock_engine(i915, "mock", RCS0); - if (!i915->engine[RCS0]) + i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0); + if (!i915->gt.engine[RCS0]) goto err_unlock; - if (mock_engine_init(i915->engine[RCS0])) + if (mock_engine_init(i915->gt.engine[RCS0])) goto err_context; __clear_bit(I915_WEDGED, &i915->gt.reset.flags); intel_engines_driver_register(i915); + i915->do_release = true; + return i915; err_context: @@ -198,9 +214,7 @@ err_drv: intel_gt_driver_late_release(&i915->gt); intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); - drm_dev_fini(&i915->drm); -put_device: - put_device(&pdev->dev); -err: + drm_dev_put(&i915->drm); + return NULL; } diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index f22cfbf9353e..ba4ca17fd4d8 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -18,6 +18,7 @@ #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_of.h> +#include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" @@ -143,10 +144,6 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = .atomic_check = dw_hdmi_imx_atomic_check, }; -static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con, const struct drm_display_mode *mode) @@ -236,8 +233,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, return ret; drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); - drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); platform_set_drvdata(pdev, hdmi); diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index da87c70e413b..2e38f1a5cf8d 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -42,12 +42,6 @@ void imx_drm_connector_destroy(struct drm_connector *connector) } EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); -void imx_drm_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} -EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy); - static int imx_drm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -139,8 +133,8 @@ int imx_drm_encoder_parse_of(struct drm_device *drm, encoder->possible_crtcs = crtc_mask; - /* FIXME: this is the mask of outputs which can clone this output. */ - encoder->possible_clones = ~0; + /* FIXME: cloning support not clear, disable it all for now */ + encoder->possible_clones = 0; return 0; } diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index ab9c6f706eb3..c3e1a3f14d30 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -38,7 +38,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm, struct drm_encoder *encoder, struct device_node *np); void imx_drm_connector_destroy(struct drm_connector *connector); -void imx_drm_encoder_destroy(struct drm_encoder *encoder); int ipu_planes_assign_pre(struct drm_device *dev, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 4da22a94790c..66ea68e8da87 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -26,6 +26,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" @@ -393,10 +394,6 @@ static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = .best_encoder = imx_ldb_connector_best_encoder, }; -static const struct drm_encoder_funcs imx_ldb_encoder_funcs = { - .destroy = imx_drm_encoder_destroy, -}; - static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { .atomic_mode_set = imx_ldb_encoder_atomic_mode_set, .enable = imx_ldb_encoder_enable, @@ -441,8 +438,7 @@ static int imx_ldb_register(struct drm_device *drm, } drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); - drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS); if (imx_ldb_ch->bridge) { ret = drm_bridge_attach(&imx_ldb_ch->encoder, diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 5bbfaa2cd0f4..ee63782c77e9 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -21,6 +21,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" @@ -348,10 +349,6 @@ static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = .mode_valid = imx_tve_connector_mode_valid, }; -static const struct drm_encoder_funcs imx_tve_encoder_funcs = { - .destroy = imx_drm_encoder_destroy, -}; - static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { .mode_set = imx_tve_encoder_mode_set, .enable = imx_tve_encoder_enable, @@ -479,8 +476,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) return ret; drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs); - drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, - encoder_type, NULL); + drm_simple_encoder_init(drm, &tve->encoder, encoder_type); drm_connector_helper_add(&tve->connector, &imx_tve_connector_helper_funcs); diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 08fafa4bf8c2..ac916c84a631 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -18,6 +18,7 @@ #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" @@ -256,10 +257,6 @@ static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { .best_encoder = imx_pd_connector_best_encoder, }; -static const struct drm_encoder_funcs imx_pd_encoder_funcs = { - .destroy = imx_drm_encoder_destroy, -}; - static const struct drm_bridge_funcs imx_pd_bridge_funcs = { .enable = imx_pd_bridge_enable, .disable = imx_pd_bridge_disable, @@ -288,8 +285,7 @@ static int imx_pd_register(struct drm_device *drm, */ imxpd->connector.dpms = DRM_MODE_DPMS_OFF; - drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs, - DRM_MODE_ENCODER_NONE, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE); imxpd->bridge.funcs = &imx_pd_bridge_funcs; drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 9dfe7cb530e1..24cc3587cea5 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -23,11 +23,13 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> +#include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> #define JZ_REG_LCD_CFG 0x00 @@ -488,15 +490,6 @@ static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void ingenic_drm_release(struct drm_device *drm) -{ - struct ingenic_drm *priv = drm_device_get_priv(drm); - - drm_mode_config_cleanup(drm); - drm_dev_fini(drm); - kfree(priv); -} - static int ingenic_drm_enable_vblank(struct drm_crtc *crtc) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); @@ -540,7 +533,6 @@ static struct drm_driver ingenic_drm_driver_data = { .gem_prime_mmap = drm_gem_cma_prime_mmap, .irq_handler = ingenic_drm_irq_handler, - .release = ingenic_drm_release, }; static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = { @@ -592,10 +584,6 @@ static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static const struct drm_encoder_funcs ingenic_drm_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static void ingenic_drm_free_dma_hwdesc(void *d) { struct ingenic_drm *priv = d; @@ -639,8 +627,12 @@ static int ingenic_drm_probe(struct platform_device *pdev) kfree(priv); return ret; } + drmm_add_final_kfree(drm, priv); + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; - drm_mode_config_init(drm); drm->mode_config.min_width = 0; drm->mode_config.min_height = 0; drm->mode_config.max_width = soc_info->max_width; @@ -661,10 +653,8 @@ static int ingenic_drm_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Failed to get platform irq"); + if (irq < 0) return irq; - } if (soc_info->needs_dev_clk) { priv->lcd_clk = devm_clk_get(dev, "lcd"); @@ -730,8 +720,8 @@ static int ingenic_drm_probe(struct platform_device *pdev) drm_encoder_helper_add(&priv->encoder, &ingenic_drm_encoder_helper_funcs); - ret = drm_encoder_init(drm, &priv->encoder, &ingenic_drm_encoder_funcs, - DRM_MODE_ENCODER_DPI, NULL); + ret = drm_simple_encoder_init(drm, &priv->encoder, + DRM_MODE_ENCODER_DPI); if (ret) { dev_err(dev, "Failed to init encoder: %i", ret); return ret; @@ -791,9 +781,7 @@ static int ingenic_drm_probe(struct platform_device *pdev) goto err_devclk_disable; } - ret = drm_fbdev_generic_setup(drm, 32); - if (ret) - dev_warn(dev, "Unable to start fbdev emulation: %i", ret); + drm_fbdev_generic_setup(drm, 32); return 0; diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig index d589f09d04d9..fa1d4f5df31e 100644 --- a/drivers/gpu/drm/lima/Kconfig +++ b/drivers/gpu/drm/lima/Kconfig @@ -10,5 +10,7 @@ config DRM_LIMA depends on OF select DRM_SCHED select DRM_GEM_SHMEM_HELPER + select PM_DEVFREQ + select DEVFREQ_GOV_SIMPLE_ONDEMAND help DRM driver for ARM Mali 400/450 GPUs. diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile index a85444b0a1d4..ca2097b8e1ad 100644 --- a/drivers/gpu/drm/lima/Makefile +++ b/drivers/gpu/drm/lima/Makefile @@ -14,6 +14,8 @@ lima-y := \ lima_sched.o \ lima_ctx.o \ lima_dlbu.o \ - lima_bcast.o + lima_bcast.o \ + lima_trace.o \ + lima_devfreq.o obj-$(CONFIG_DRM_LIMA) += lima.o diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c index 22fff6caa961..891d5cd5019a 100644 --- a/drivers/gpu/drm/lima/lima_ctx.c +++ b/drivers/gpu/drm/lima/lima_ctx.c @@ -27,6 +27,9 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id) if (err < 0) goto err_out0; + ctx->pid = task_pid_nr(current); + get_task_comm(ctx->pname, current); + return 0; err_out0: diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h index 6154e5c9bfe4..74e2be09090f 100644 --- a/drivers/gpu/drm/lima/lima_ctx.h +++ b/drivers/gpu/drm/lima/lima_ctx.h @@ -5,6 +5,7 @@ #define __LIMA_CTX_H__ #include <linux/xarray.h> +#include <linux/sched.h> #include "lima_device.h" @@ -13,6 +14,10 @@ struct lima_ctx { struct lima_device *dev; struct lima_sched_context context[lima_pipe_num]; atomic_t guilty; + + /* debug info */ + char pname[TASK_COMM_LEN]; + pid_t pid; }; struct lima_ctx_mgr { diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c new file mode 100644 index 000000000000..8c4d21d07529 --- /dev/null +++ b/drivers/gpu/drm/lima/lima_devfreq.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * + * Based on panfrost_devfreq.c: + * Copyright 2019 Collabora ltd. + */ +#include <linux/clk.h> +#include <linux/devfreq.h> +#include <linux/devfreq_cooling.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/property.h> + +#include "lima_device.h" +#include "lima_devfreq.h" + +static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq) +{ + ktime_t now, last; + + now = ktime_get(); + last = devfreq->time_last_update; + + if (devfreq->busy_count > 0) + devfreq->busy_time += ktime_sub(now, last); + else + devfreq->idle_time += ktime_sub(now, last); + + devfreq->time_last_update = now; +} + +static int lima_devfreq_target(struct device *dev, unsigned long *freq, + u32 flags) +{ + struct dev_pm_opp *opp; + int err; + + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) + return PTR_ERR(opp); + dev_pm_opp_put(opp); + + err = dev_pm_opp_set_rate(dev, *freq); + if (err) + return err; + + return 0; +} + +static void lima_devfreq_reset(struct lima_devfreq *devfreq) +{ + devfreq->busy_time = 0; + devfreq->idle_time = 0; + devfreq->time_last_update = ktime_get(); +} + +static int lima_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *status) +{ + struct lima_device *ldev = dev_get_drvdata(dev); + struct lima_devfreq *devfreq = &ldev->devfreq; + unsigned long irqflags; + + status->current_frequency = clk_get_rate(ldev->clk_gpu); + + spin_lock_irqsave(&devfreq->lock, irqflags); + + lima_devfreq_update_utilization(devfreq); + + status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time, + devfreq->idle_time)); + status->busy_time = ktime_to_ns(devfreq->busy_time); + + lima_devfreq_reset(devfreq); + + spin_unlock_irqrestore(&devfreq->lock, irqflags); + + dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", + status->busy_time, status->total_time, + status->busy_time / (status->total_time / 100), + status->current_frequency / 1000 / 1000); + + return 0; +} + +static struct devfreq_dev_profile lima_devfreq_profile = { + .polling_ms = 50, /* ~3 frames */ + .target = lima_devfreq_target, + .get_dev_status = lima_devfreq_get_dev_status, +}; + +void lima_devfreq_fini(struct lima_device *ldev) +{ + struct lima_devfreq *devfreq = &ldev->devfreq; + + if (devfreq->cooling) { + devfreq_cooling_unregister(devfreq->cooling); + devfreq->cooling = NULL; + } + + if (devfreq->devfreq) { + devm_devfreq_remove_device(&ldev->pdev->dev, + devfreq->devfreq); + devfreq->devfreq = NULL; + } + + if (devfreq->opp_of_table_added) { + dev_pm_opp_of_remove_table(&ldev->pdev->dev); + devfreq->opp_of_table_added = false; + } + + if (devfreq->regulators_opp_table) { + dev_pm_opp_put_regulators(devfreq->regulators_opp_table); + devfreq->regulators_opp_table = NULL; + } + + if (devfreq->clkname_opp_table) { + dev_pm_opp_put_clkname(devfreq->clkname_opp_table); + devfreq->clkname_opp_table = NULL; + } +} + +int lima_devfreq_init(struct lima_device *ldev) +{ + struct thermal_cooling_device *cooling; + struct device *dev = &ldev->pdev->dev; + struct opp_table *opp_table; + struct devfreq *devfreq; + struct lima_devfreq *ldevfreq = &ldev->devfreq; + struct dev_pm_opp *opp; + unsigned long cur_freq; + int ret; + + if (!device_property_present(dev, "operating-points-v2")) + /* Optional, continue without devfreq */ + return 0; + + spin_lock_init(&ldevfreq->lock); + + opp_table = dev_pm_opp_set_clkname(dev, "core"); + if (IS_ERR(opp_table)) { + ret = PTR_ERR(opp_table); + goto err_fini; + } + + ldevfreq->clkname_opp_table = opp_table; + + opp_table = dev_pm_opp_set_regulators(dev, + (const char *[]){ "mali" }, + 1); + if (IS_ERR(opp_table)) { + ret = PTR_ERR(opp_table); + + /* Continue if the optional regulator is missing */ + if (ret != -ENODEV) + goto err_fini; + } else { + ldevfreq->regulators_opp_table = opp_table; + } + + ret = dev_pm_opp_of_add_table(dev); + if (ret) + goto err_fini; + ldevfreq->opp_of_table_added = true; + + lima_devfreq_reset(ldevfreq); + + cur_freq = clk_get_rate(ldev->clk_gpu); + + opp = devfreq_recommended_opp(dev, &cur_freq, 0); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto err_fini; + } + + lima_devfreq_profile.initial_freq = cur_freq; + dev_pm_opp_put(opp); + + devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile, + DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL); + if (IS_ERR(devfreq)) { + dev_err(dev, "Couldn't initialize GPU devfreq\n"); + ret = PTR_ERR(devfreq); + goto err_fini; + } + + ldevfreq->devfreq = devfreq; + + cooling = of_devfreq_cooling_register(dev->of_node, devfreq); + if (IS_ERR(cooling)) + dev_info(dev, "Failed to register cooling device\n"); + else + ldevfreq->cooling = cooling; + + return 0; + +err_fini: + lima_devfreq_fini(ldev); + return ret; +} + +void lima_devfreq_record_busy(struct lima_devfreq *devfreq) +{ + unsigned long irqflags; + + if (!devfreq->devfreq) + return; + + spin_lock_irqsave(&devfreq->lock, irqflags); + + lima_devfreq_update_utilization(devfreq); + + devfreq->busy_count++; + + spin_unlock_irqrestore(&devfreq->lock, irqflags); +} + +void lima_devfreq_record_idle(struct lima_devfreq *devfreq) +{ + unsigned long irqflags; + + if (!devfreq->devfreq) + return; + + spin_lock_irqsave(&devfreq->lock, irqflags); + + lima_devfreq_update_utilization(devfreq); + + WARN_ON(--devfreq->busy_count < 0); + + spin_unlock_irqrestore(&devfreq->lock, irqflags); +} diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h new file mode 100644 index 000000000000..8d71ba9fb22a --- /dev/null +++ b/drivers/gpu/drm/lima/lima_devfreq.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */ + +#ifndef __LIMA_DEVFREQ_H__ +#define __LIMA_DEVFREQ_H__ + +#include <linux/spinlock.h> +#include <linux/ktime.h> + +struct devfreq; +struct opp_table; +struct thermal_cooling_device; + +struct lima_device; + +struct lima_devfreq { + struct devfreq *devfreq; + struct opp_table *clkname_opp_table; + struct opp_table *regulators_opp_table; + struct thermal_cooling_device *cooling; + bool opp_of_table_added; + + ktime_t busy_time; + ktime_t idle_time; + ktime_t time_last_update; + int busy_count; + /* + * Protect busy_time, idle_time, time_last_update and busy_count + * because these can be updated concurrently, for example by the GP + * and PP interrupts. + */ + spinlock_t lock; +}; + +int lima_devfreq_init(struct lima_device *ldev); +void lima_devfreq_fini(struct lima_device *ldev); + +void lima_devfreq_record_busy(struct lima_devfreq *devfreq); +void lima_devfreq_record_idle(struct lima_devfreq *devfreq); + +#endif diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 19829b543024..247f51fd40a2 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -214,6 +214,8 @@ static int lima_init_gp_pipe(struct lima_device *dev) struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; int err; + pipe->ldev = dev; + err = lima_sched_pipe_init(pipe, "gp"); if (err) return err; @@ -244,6 +246,8 @@ static int lima_init_pp_pipe(struct lima_device *dev) struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; int err, i; + pipe->ldev = dev; + err = lima_sched_pipe_init(pipe, "pp"); if (err) return err; @@ -344,6 +348,12 @@ int lima_device_init(struct lima_device *ldev) if (err) goto err_out5; + ldev->dump.magic = LIMA_DUMP_MAGIC; + ldev->dump.version_major = LIMA_DUMP_MAJOR; + ldev->dump.version_minor = LIMA_DUMP_MINOR; + INIT_LIST_HEAD(&ldev->error_task_list); + mutex_init(&ldev->error_task_list_lock); + dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus)); dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu)); @@ -370,6 +380,13 @@ err_out0: void lima_device_fini(struct lima_device *ldev) { int i; + struct lima_sched_error_task *et, *tmp; + + list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) { + list_del(&et->list); + kvfree(et); + } + mutex_destroy(&ldev->error_task_list_lock); lima_fini_pp_pipe(ldev); lima_fini_gp_pipe(ldev); diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h index 31158d86271c..06fd9636dd72 100644 --- a/drivers/gpu/drm/lima/lima_device.h +++ b/drivers/gpu/drm/lima/lima_device.h @@ -6,8 +6,12 @@ #include <drm/drm_device.h> #include <linux/delay.h> +#include <linux/list.h> +#include <linux/mutex.h> #include "lima_sched.h" +#include "lima_dump.h" +#include "lima_devfreq.h" enum lima_gpu_id { lima_gpu_mali400 = 0, @@ -94,6 +98,13 @@ struct lima_device { u32 *dlbu_cpu; dma_addr_t dlbu_dma; + + struct lima_devfreq devfreq; + + /* debug info */ + struct lima_dump_head dump; + struct list_head error_task_list; + struct mutex error_task_list_lock; }; static inline struct lima_device * diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 2daac64d8955..bbbdc8455e2f 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -10,12 +10,14 @@ #include <drm/drm_prime.h> #include <drm/lima_drm.h> +#include "lima_device.h" #include "lima_drv.h" #include "lima_gem.h" #include "lima_vm.h" int lima_sched_timeout_ms; uint lima_heap_init_nr_pages = 8; +uint lima_max_error_tasks; MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms"); module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444); @@ -23,6 +25,9 @@ module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444); MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages"); module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444); +MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save"); +module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644); + static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_get_param *args = data; @@ -272,6 +277,93 @@ static struct drm_driver lima_drm_driver = { .gem_prime_mmap = drm_gem_prime_mmap, }; +struct lima_block_reader { + void *dst; + size_t base; + size_t count; + size_t off; + ssize_t read; +}; + +static bool lima_read_block(struct lima_block_reader *reader, + void *src, size_t src_size) +{ + size_t max_off = reader->base + src_size; + + if (reader->off < max_off) { + size_t size = min_t(size_t, max_off - reader->off, + reader->count); + + memcpy(reader->dst, src + (reader->off - reader->base), size); + + reader->dst += size; + reader->off += size; + reader->read += size; + reader->count -= size; + } + + reader->base = max_off; + + return !!reader->count; +} + +static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct lima_device *ldev = dev_get_drvdata(dev); + struct lima_sched_error_task *et; + struct lima_block_reader reader = { + .dst = buf, + .count = count, + .off = off, + }; + + mutex_lock(&ldev->error_task_list_lock); + + if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) { + list_for_each_entry(et, &ldev->error_task_list, list) { + if (!lima_read_block(&reader, et->data, et->size)) + break; + } + } + + mutex_unlock(&ldev->error_task_list_lock); + return reader.read; +} + +static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct lima_device *ldev = dev_get_drvdata(dev); + struct lima_sched_error_task *et, *tmp; + + mutex_lock(&ldev->error_task_list_lock); + + list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) { + list_del(&et->list); + kvfree(et); + } + + ldev->dump.size = 0; + ldev->dump.num_tasks = 0; + + mutex_unlock(&ldev->error_task_list_lock); + + return count; +} + +static const struct bin_attribute lima_error_state_attr = { + .attr.name = "error", + .attr.mode = 0600, + .size = 0, + .read = lima_error_state_read, + .write = lima_error_state_write, +}; + static int lima_pdev_probe(struct platform_device *pdev) { struct lima_device *ldev; @@ -306,18 +398,31 @@ static int lima_pdev_probe(struct platform_device *pdev) if (err) goto err_out1; + err = lima_devfreq_init(ldev); + if (err) { + dev_err(&pdev->dev, "Fatal error during devfreq init\n"); + goto err_out2; + } + /* * Register the DRM device with the core and the connectors with * sysfs. */ err = drm_dev_register(ddev, 0); if (err < 0) - goto err_out2; + goto err_out3; + + platform_set_drvdata(pdev, ldev); + + if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr)) + dev_warn(ldev->dev, "fail to create error state sysfs\n"); return 0; -err_out2: +err_out3: lima_device_fini(ldev); +err_out2: + lima_devfreq_fini(ldev); err_out1: drm_dev_put(ddev); err_out0: @@ -330,7 +435,10 @@ static int lima_pdev_remove(struct platform_device *pdev) struct lima_device *ldev = platform_get_drvdata(pdev); struct drm_device *ddev = ldev->ddev; + sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr); + platform_set_drvdata(pdev, NULL); drm_dev_unregister(ddev); + lima_devfreq_fini(ldev); lima_device_fini(ldev); drm_dev_put(ddev); lima_sched_slab_fini(); diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h index f492ecc6a5d9..fdbd4077c768 100644 --- a/drivers/gpu/drm/lima/lima_drv.h +++ b/drivers/gpu/drm/lima/lima_drv.h @@ -10,6 +10,7 @@ extern int lima_sched_timeout_ms; extern uint lima_heap_init_nr_pages; +extern uint lima_max_error_tasks; struct lima_vm; struct lima_bo; diff --git a/drivers/gpu/drm/lima/lima_dump.h b/drivers/gpu/drm/lima/lima_dump.h new file mode 100644 index 000000000000..ca243d99c51b --- /dev/null +++ b/drivers/gpu/drm/lima/lima_dump.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */ + +#ifndef __LIMA_DUMP_H__ +#define __LIMA_DUMP_H__ + +#include <linux/types.h> + +/** + * dump file format for all the information to start a lima task + * + * top level format + * | magic code "LIMA" | format version | num tasks | data size | + * | reserved | reserved | reserved | reserved | + * | task 1 ID | task 1 size | num chunks | reserved | task 1 data | + * | task 2 ID | task 2 size | num chunks | reserved | task 2 data | + * ... + * + * task data format + * | chunk 1 ID | chunk 1 size | reserved | reserved | chunk 1 data | + * | chunk 2 ID | chunk 2 size | reserved | reserved | chunk 2 data | + * ... + * + */ + +#define LIMA_DUMP_MAJOR 1 +#define LIMA_DUMP_MINOR 0 + +#define LIMA_DUMP_MAGIC 0x414d494c + +struct lima_dump_head { + __u32 magic; + __u16 version_major; + __u16 version_minor; + __u32 num_tasks; + __u32 size; + __u32 reserved[4]; +}; + +#define LIMA_DUMP_TASK_GP 0 +#define LIMA_DUMP_TASK_PP 1 +#define LIMA_DUMP_TASK_NUM 2 + +struct lima_dump_task { + __u32 id; + __u32 size; + __u32 num_chunks; + __u32 reserved; +}; + +#define LIMA_DUMP_CHUNK_FRAME 0 +#define LIMA_DUMP_CHUNK_BUFFER 1 +#define LIMA_DUMP_CHUNK_PROCESS_NAME 2 +#define LIMA_DUMP_CHUNK_PROCESS_ID 3 +#define LIMA_DUMP_CHUNK_NUM 4 + +struct lima_dump_chunk { + __u32 id; + __u32 size; + __u32 reserved[2]; +}; + +struct lima_dump_chunk_buffer { + __u32 id; + __u32 size; + __u32 va; + __u32 reserved; +}; + +struct lima_dump_chunk_pid { + __u32 id; + __u32 size; + __u32 pid; + __u32 reserved; +}; + +#endif diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 3886999b4533..a2db1c937424 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -3,14 +3,16 @@ #include <linux/kthread.h> #include <linux/slab.h> -#include <linux/xarray.h> +#include <linux/vmalloc.h> +#include "lima_devfreq.h" #include "lima_drv.h" #include "lima_sched.h" #include "lima_vm.h" #include "lima_mmu.h" #include "lima_l2_cache.h" #include "lima_gem.h" +#include "lima_trace.h" struct lima_fence { struct dma_fence base; @@ -176,6 +178,7 @@ struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *conte { struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); + trace_lima_task_submit(task); drm_sched_entity_push_job(&task->base, &context->base); return fence; } @@ -214,6 +217,8 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) */ ret = dma_fence_get(task->fence); + lima_devfreq_record_busy(&pipe->ldev->devfreq); + pipe->current_task = task; /* this is needed for MMU to work correctly, otherwise GP/PP @@ -250,12 +255,141 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) if (last_vm) lima_vm_put(last_vm); + trace_lima_task_run(task); + pipe->error = false; pipe->task_run(pipe, task); return task->fence; } +static void lima_sched_build_error_task_list(struct lima_sched_task *task) +{ + struct lima_sched_error_task *et; + struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); + struct lima_ip *ip = pipe->processor[0]; + int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp; + struct lima_device *dev = ip->dev; + struct lima_sched_context *sched_ctx = + container_of(task->base.entity, + struct lima_sched_context, base); + struct lima_ctx *ctx = + container_of(sched_ctx, struct lima_ctx, context[pipe_id]); + struct lima_dump_task *dt; + struct lima_dump_chunk *chunk; + struct lima_dump_chunk_pid *pid_chunk; + struct lima_dump_chunk_buffer *buffer_chunk; + u32 size, task_size, mem_size; + int i; + + mutex_lock(&dev->error_task_list_lock); + + if (dev->dump.num_tasks >= lima_max_error_tasks) { + dev_info(dev->dev, "fail to save task state: error task list is full\n"); + goto out; + } + + /* frame chunk */ + size = sizeof(struct lima_dump_chunk) + pipe->frame_size; + /* process name chunk */ + size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname); + /* pid chunk */ + size += sizeof(struct lima_dump_chunk); + /* buffer chunks */ + for (i = 0; i < task->num_bos; i++) { + struct lima_bo *bo = task->bos[i]; + + size += sizeof(struct lima_dump_chunk); + size += bo->heap_size ? bo->heap_size : lima_bo_size(bo); + } + + task_size = size + sizeof(struct lima_dump_task); + mem_size = task_size + sizeof(*et); + et = kvmalloc(mem_size, GFP_KERNEL); + if (!et) { + dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n", + mem_size); + goto out; + } + + et->data = et + 1; + et->size = task_size; + + dt = et->data; + memset(dt, 0, sizeof(*dt)); + dt->id = pipe_id; + dt->size = size; + + chunk = (struct lima_dump_chunk *)(dt + 1); + memset(chunk, 0, sizeof(*chunk)); + chunk->id = LIMA_DUMP_CHUNK_FRAME; + chunk->size = pipe->frame_size; + memcpy(chunk + 1, task->frame, pipe->frame_size); + dt->num_chunks++; + + chunk = (void *)(chunk + 1) + chunk->size; + memset(chunk, 0, sizeof(*chunk)); + chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME; + chunk->size = sizeof(ctx->pname); + memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname)); + dt->num_chunks++; + + pid_chunk = (void *)(chunk + 1) + chunk->size; + memset(pid_chunk, 0, sizeof(*pid_chunk)); + pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID; + pid_chunk->pid = ctx->pid; + dt->num_chunks++; + + buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size; + for (i = 0; i < task->num_bos; i++) { + struct lima_bo *bo = task->bos[i]; + void *data; + + memset(buffer_chunk, 0, sizeof(*buffer_chunk)); + buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER; + buffer_chunk->va = lima_vm_get_va(task->vm, bo); + + if (bo->heap_size) { + buffer_chunk->size = bo->heap_size; + + data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT, + VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (!data) { + kvfree(et); + goto out; + } + + memcpy(buffer_chunk + 1, data, buffer_chunk->size); + + vunmap(data); + } else { + buffer_chunk->size = lima_bo_size(bo); + + data = drm_gem_shmem_vmap(&bo->base.base); + if (IS_ERR_OR_NULL(data)) { + kvfree(et); + goto out; + } + + memcpy(buffer_chunk + 1, data, buffer_chunk->size); + + drm_gem_shmem_vunmap(&bo->base.base, data); + } + + buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; + dt->num_chunks++; + } + + list_add(&et->list, &dev->error_task_list); + dev->dump.size += et->size; + dev->dump.num_tasks++; + + dev_info(dev->dev, "save error task state success\n"); + +out: + mutex_unlock(&dev->error_task_list_lock); +} + static void lima_sched_timedout_job(struct drm_sched_job *job) { struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); @@ -268,6 +402,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job) drm_sched_increase_karma(&task->base); + lima_sched_build_error_task_list(task); + pipe->task_error(pipe); if (pipe->bcast_mmu) @@ -285,6 +421,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job) pipe->current_vm = NULL; pipe->current_task = NULL; + lima_devfreq_record_idle(&pipe->ldev->devfreq); + drm_sched_resubmit_jobs(&pipe->base); drm_sched_start(&pipe->base, true); } @@ -364,5 +502,7 @@ void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe) } else { pipe->task_fini(pipe); dma_fence_signal(task->fence); + + lima_devfreq_record_idle(&pipe->ldev->devfreq); } } diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h index d64393fb50a9..90f03c48ef4a 100644 --- a/drivers/gpu/drm/lima/lima_sched.h +++ b/drivers/gpu/drm/lima/lima_sched.h @@ -5,9 +5,18 @@ #define __LIMA_SCHED_H__ #include <drm/gpu_scheduler.h> +#include <linux/list.h> +#include <linux/xarray.h> +struct lima_device; struct lima_vm; +struct lima_sched_error_task { + struct list_head list; + void *data; + u32 size; +}; + struct lima_sched_task { struct drm_sched_job base; @@ -44,6 +53,8 @@ struct lima_sched_pipe { u32 fence_seqno; spinlock_t fence_lock; + struct lima_device *ldev; + struct lima_sched_task *current_task; struct lima_vm *current_vm; diff --git a/drivers/gpu/drm/lima/lima_trace.c b/drivers/gpu/drm/lima/lima_trace.c new file mode 100644 index 000000000000..ea1c7289bebc --- /dev/null +++ b/drivers/gpu/drm/lima/lima_trace.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */ + +#include "lima_sched.h" + +#define CREATE_TRACE_POINTS +#include "lima_trace.h" diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h new file mode 100644 index 000000000000..3a430e93d384 --- /dev/null +++ b/drivers/gpu/drm/lima/lima_trace.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */ + +#if !defined(_LIMA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _LIMA_TRACE_H_ + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM lima +#define TRACE_INCLUDE_FILE lima_trace + +DECLARE_EVENT_CLASS(lima_task, + TP_PROTO(struct lima_sched_task *task), + TP_ARGS(task), + TP_STRUCT__entry( + __field(uint64_t, task_id) + __field(unsigned int, context) + __field(unsigned int, seqno) + __string(pipe, task->base.sched->name) + ), + + TP_fast_assign( + __entry->task_id = task->base.id; + __entry->context = task->base.s_fence->finished.context; + __entry->seqno = task->base.s_fence->finished.seqno; + __assign_str(pipe, task->base.sched->name) + ), + + TP_printk("task=%llu, context=%u seqno=%u pipe=%s", + __entry->task_id, __entry->context, __entry->seqno, + __get_str(pipe)) +); + +DEFINE_EVENT(lima_task, lima_task_submit, + TP_PROTO(struct lima_sched_task *task), + TP_ARGS(task) +); + +DEFINE_EVENT(lima_task, lima_task_run, + TP_PROTO(struct lima_sched_task *task), + TP_ARGS(task) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/lima +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index f28cb7a576ba..88cc6b4a7a64 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -72,6 +72,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_panel.h> @@ -183,13 +184,13 @@ static int mcde_modeset_init(struct drm_device *drm) ret = drm_vblank_init(drm, 1); if (ret) { dev_err(drm->dev, "failed to init vblank\n"); - goto out_config; + return ret; } ret = mcde_display_init(drm); if (ret) { dev_err(drm->dev, "failed to init display\n"); - goto out_config; + return ret; } /* @@ -203,7 +204,7 @@ static int mcde_modeset_init(struct drm_device *drm) mcde->bridge); if (ret) { dev_err(drm->dev, "failed to attach display output bridge\n"); - goto out_config; + return ret; } drm_mode_config_reset(drm); @@ -211,19 +212,6 @@ static int mcde_modeset_init(struct drm_device *drm) drm_fbdev_generic_setup(drm, 32); return 0; - -out_config: - drm_mode_config_cleanup(drm); - return ret; -} - -static void mcde_release(struct drm_device *drm) -{ - struct mcde *mcde = drm->dev_private; - - drm_mode_config_cleanup(drm); - drm_dev_fini(drm); - kfree(mcde); } DEFINE_DRM_GEM_CMA_FOPS(drm_fops); @@ -231,7 +219,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops); static struct drm_driver mcde_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .release = mcde_release, .lastclose = drm_fb_helper_lastclose, .ioctls = NULL, .fops = &drm_fops, @@ -259,7 +246,9 @@ static int mcde_drm_bind(struct device *dev) struct drm_device *drm = dev_get_drvdata(dev); int ret; - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) + return ret; ret = component_bind_all(drm->dev, drm); if (ret) { @@ -323,13 +312,14 @@ static int mcde_probe(struct platform_device *pdev) return -ENOMEM; mcde->dev = dev; - ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev); + ret = devm_drm_dev_init(dev, &mcde->drm, &mcde_drm_driver); if (ret) { kfree(mcde); return ret; } drm = &mcde->drm; drm->dev_private = mcde; + drmm_add_final_kfree(drm, mcde); platform_set_drvdata(pdev, drm); /* Enable continuous updates: this is what Linux' framebuffer expects */ @@ -341,12 +331,12 @@ static int mcde_probe(struct platform_device *pdev) if (IS_ERR(mcde->epod)) { ret = PTR_ERR(mcde->epod); dev_err(dev, "can't get EPOD regulator\n"); - goto dev_unref; + return ret; } ret = regulator_enable(mcde->epod); if (ret) { dev_err(dev, "can't enable EPOD regulator\n"); - goto dev_unref; + return ret; } mcde->vana = devm_regulator_get(dev, "vana"); if (IS_ERR(mcde->vana)) { @@ -497,8 +487,6 @@ regulator_off: regulator_disable(mcde->vana); regulator_epod_off: regulator_disable(mcde->epod); -dev_unref: - drm_dev_put(drm); return ret; } @@ -512,7 +500,6 @@ static int mcde_remove(struct platform_device *pdev) clk_disable_unprepare(mcde->mcde_clk); regulator_disable(mcde->vana); regulator_disable(mcde->epod); - drm_dev_put(drm); return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 4f0ce4cd5b8c..52a3503edd8f 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -20,6 +20,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_of.h> +#include <drm/drm_simple_kms_helper.h> #include "mtk_dpi_regs.h" #include "mtk_drm_ddp_comp.h" @@ -509,15 +510,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, return 0; } -static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = { - .destroy = mtk_dpi_encoder_destroy, -}; - static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -596,8 +588,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) return ret; } - ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + ret = drm_simple_encoder_init(drm_dev, &dpi->encoder, + DRM_MODE_ENCODER_TMDS); if (ret) { dev_err(dev, "Failed to initialize decoder: %d\n", ret); goto err_unregister; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 0563c6813333..ce570283b55f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -162,7 +162,9 @@ static int mtk_drm_kms_init(struct drm_device *drm) } private->mutex_dev = &pdev->dev; - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) + return ret; drm->mode_config.min_width = 64; drm->mode_config.min_height = 64; @@ -179,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) ret = component_bind_all(drm->dev, drm); if (ret) - goto err_config_cleanup; + return ret; /* * We currently support two fixed data streams, each optional, @@ -255,8 +257,6 @@ err_unset_dma_parms: dma_dev->dma_parms = NULL; err_component_unbind: component_unbind_all(drm->dev, drm); -err_config_cleanup: - drm_mode_config_cleanup(drm); return ret; } @@ -272,7 +272,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm) private->dma_dev->dma_parms = NULL; component_unbind_all(drm->dev, drm); - drm_mode_config_cleanup(drm); } static const struct file_operations mtk_drm_fops = { @@ -348,9 +347,7 @@ static int mtk_drm_bind(struct device *dev) if (ret < 0) goto err_deinit; - ret = drm_fbdev_generic_setup(drm, 32); - if (ret) - DRM_ERROR("Failed to initialize fbdev: %d\n", ret); + drm_fbdev_generic_setup(drm, 32); return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 0ede69830a9d..a9a25087112f 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -22,6 +22,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "mtk_drm_ddp_comp.h" @@ -787,15 +788,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) dsi->enabled = false; } -static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = { - .destroy = mtk_dsi_encoder_destroy, -}; - static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -888,8 +880,8 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi) { int ret; - ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); + ret = drm_simple_encoder_init(drm, &dsi->encoder, + DRM_MODE_ENCODER_DSI); if (ret) { DRM_ERROR("Failed to encoder init to drm\n"); return ret; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index a8b20557539b..ff43a3d80410 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> +#include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/of.h> #include <linux/of_gpio.h> @@ -169,6 +170,9 @@ struct mtk_hdmi { bool audio_enable; bool powered; bool enabled; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct mutex update_plugged_status_lock; }; static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b) @@ -1194,13 +1198,26 @@ static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi) clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]); } +static enum drm_connector_status +mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi) +{ + bool connected; + + mutex_lock(&hdmi->update_plugged_status_lock); + connected = mtk_cec_hpd_high(hdmi->cec_dev); + if (hdmi->plugged_cb && hdmi->codec_dev) + hdmi->plugged_cb(hdmi->codec_dev, connected); + mutex_unlock(&hdmi->update_plugged_status_lock); + + return connected ? + connector_status_connected : connector_status_disconnected; +} + static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn, bool force) { struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn); - - return mtk_cec_hpd_high(hdmi->cec_dev) ? - connector_status_connected : connector_status_disconnected; + return mtk_hdmi_update_plugged_status(hdmi); } static void hdmi_conn_destroy(struct drm_connector *conn) @@ -1657,20 +1674,39 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, return 0; } +static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct mtk_hdmi *hdmi = data; + + mutex_lock(&hdmi->update_plugged_status_lock); + hdmi->plugged_cb = fn; + hdmi->codec_dev = codec_dev; + mutex_unlock(&hdmi->update_plugged_status_lock); + + mtk_hdmi_update_plugged_status(hdmi); + + return 0; +} + static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .hw_params = mtk_hdmi_audio_hw_params, .audio_startup = mtk_hdmi_audio_startup, .audio_shutdown = mtk_hdmi_audio_shutdown, .digital_mute = mtk_hdmi_audio_digital_mute, .get_eld = mtk_hdmi_audio_get_eld, + .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb, }; -static void mtk_hdmi_register_audio_driver(struct device *dev) +static int mtk_hdmi_register_audio_driver(struct device *dev) { + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); struct hdmi_codec_pdata codec_data = { .ops = &mtk_hdmi_audio_codec_ops, .max_i2s_channels = 2, .i2s = 1, + .data = hdmi, }; struct platform_device *pdev; @@ -1678,9 +1714,10 @@ static void mtk_hdmi_register_audio_driver(struct device *dev) PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); if (IS_ERR(pdev)) - return; + return PTR_ERR(pdev); DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME); + return 0; } static int mtk_drm_hdmi_probe(struct platform_device *pdev) @@ -1706,6 +1743,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev) return ret; } + mutex_init(&hdmi->update_plugged_status_lock); platform_set_drvdata(pdev, hdmi); ret = mtk_hdmi_output_init(hdmi); @@ -1714,7 +1752,11 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev) return ret; } - mtk_hdmi_register_audio_driver(dev); + ret = mtk_hdmi_register_audio_driver(dev); + if (ret) { + dev_err(dev, "Failed to register audio driver: %d\n", ret); + return ret; + } hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs; hdmi->bridge.of_node = pdev->dev.of_node; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index b5f5eb7b4bb9..6f29fab79952 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -284,7 +284,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) /* Remove early framebuffers (ie. simplefb) */ meson_remove_framebuffers(); - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) + goto free_drm; drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; @@ -379,7 +381,6 @@ static void meson_drv_unbind(struct device *dev) drm_dev_unregister(drm); drm_irq_uninstall(drm); drm_kms_helper_poll_fini(drm); - drm_mode_config_cleanup(drm); drm_dev_put(drm); } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 7a5bad2f57d7..3298b7ef18b0 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -77,6 +77,8 @@ static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto err_mgag200_driver_unload; + drm_fbdev_generic_setup(dev, 0); + return 0; err_mgag200_driver_unload: diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index e278b6a547bd..b680cf47cbb9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -181,10 +181,6 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) dev_warn(&dev->pdev->dev, "Could not initialize cursors. Not doing hardware cursors.\n"); - r = drm_fbdev_generic_setup(mdev->dev, 0); - if (r) - goto err_modeset; - return 0; err_modeset: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 075ecce4b5e0..8cae2ca4af6b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -148,27 +148,19 @@ reset_set(void *data, u64 val) DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); -int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) +void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) { struct drm_device *dev; - int ret; if (!minor) - return 0; + return; dev = minor->dev; - ret = drm_debugfs_create_files(a5xx_debugfs_list, - ARRAY_SIZE(a5xx_debugfs_list), - minor->debugfs_root, minor); - - if (ret) { - DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n"); - return ret; - } + drm_debugfs_create_files(a5xx_debugfs_list, + ARRAY_SIZE(a5xx_debugfs_list), + minor->debugfs_root, minor); debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev, &reset_fops); - - return 0; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 833468ce6b6d..54868d4e3958 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -41,7 +41,7 @@ struct a5xx_gpu { #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) #ifdef CONFIG_DEBUG_FS -int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); +void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); #endif /* diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 6650f478b226..c902c6503675 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -259,17 +259,9 @@ static struct drm_info_list mdp5_debugfs_list[] = { static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) { - struct drm_device *dev = minor->dev; - int ret; - - ret = drm_debugfs_create_files(mdp5_debugfs_list, - ARRAY_SIZE(mdp5_debugfs_list), - minor->debugfs_root, minor); - - if (ret) { - DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n"); - return ret; - } + drm_debugfs_create_files(mdp5_debugfs_list, + ARRAY_SIZE(mdp5_debugfs_list), + minor->debugfs_root, minor); return 0; } @@ -633,7 +625,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) if (config->platform.iommu) { iommu_dev = &pdev->dev; - if (!iommu_dev->iommu_fwspec) + if (!dev_iommu_fwspec_get(iommu_dev)) iommu_dev = iommu_dev->parent; aspace = msm_gem_address_space_create(iommu_dev, diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 1c74381a4fc9..ee2e270f464c 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -214,31 +214,20 @@ int msm_debugfs_late_init(struct drm_device *dev) return ret; } -int msm_debugfs_init(struct drm_minor *minor) +void msm_debugfs_init(struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct msm_drm_private *priv = dev->dev_private; - int ret; - - ret = drm_debugfs_create_files(msm_debugfs_list, - ARRAY_SIZE(msm_debugfs_list), - minor->debugfs_root, minor); - if (ret) { - DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n"); - return ret; - } + drm_debugfs_create_files(msm_debugfs_list, + ARRAY_SIZE(msm_debugfs_list), + minor->debugfs_root, minor); debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root, dev, &msm_gpu_fops); - if (priv->kms && priv->kms->funcs->debugfs_init) { - ret = priv->kms->funcs->debugfs_init(priv->kms, minor); - if (ret) - return ret; - } - - return ret; + if (priv->kms && priv->kms->funcs->debugfs_init) + priv->kms->funcs->debugfs_init(priv->kms, minor); } #endif diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h index 2b91f8c178ad..ef58f66abbb3 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.h +++ b/drivers/gpu/drm/msm/msm_debugfs.h @@ -8,7 +8,7 @@ #define __MSM_DEBUGFS_H__ #ifdef CONFIG_DEBUG_FS -int msm_debugfs_init(struct drm_minor *minor); +void msm_debugfs_init(struct drm_minor *minor); #endif #endif /* __MSM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index be5bc2e8425c..6ccae4ba905c 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -57,7 +57,7 @@ struct msm_gpu_funcs { void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, struct drm_printer *p); /* for generation specific debugfs: */ - int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); + void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); #endif unsigned long (*gpu_busy)(struct msm_gpu *gpu); struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index e8eef88a8382..ffdd447d8706 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -35,7 +35,8 @@ #include <subdev/bios/gpio.h> #include <subdev/gpio.h> -#include <subdev/timer.h> + +#include <nvif/timer.h> int nv04_dac_output_offset(struct drm_encoder *encoder) { diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 3fdfafa8b0ad..b674d68ef28a 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -26,6 +26,7 @@ #include "hw.h" #include <subdev/bios/pll.h> +#include <nvif/timer.h> #define CHIPSET_NFORCE 0x01a0 #define CHIPSET_NFORCE2 0x01f0 diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c index 00a85f1e1a4a..ee782151d332 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c @@ -23,6 +23,7 @@ #include <nvif/cl507c.h> #include <nvif/event.h> +#include <nvif/timer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c index e7fcfa6e6467..c5152c39c684 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c @@ -23,6 +23,7 @@ #include "head.h" #include <nvif/cl507d.h> +#include <nvif/timer.h> #include "nouveau_bo.h" diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c index 3b36dc8d36b2..c03cb987856b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c @@ -24,6 +24,8 @@ #include <nouveau_bo.h> +#include <nvif/timer.h> + void corec37d_wndw_owner(struct nv50_core *core) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c index 397143b639c6..8c5cf096f69b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c @@ -24,21 +24,36 @@ #include "head.h" #include <nvif/cl507a.h> +#include <nvif/timer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> +bool +curs507a_space(struct nv50_wndw *wndw) +{ + nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2, + if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4) + return true; + ); + WARN_ON(1); + return false; +} + static void curs507a_update(struct nv50_wndw *wndw, u32 *interlock) { - nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000); + if (curs507a_space(wndw)) + nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000); } static void curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) { - nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 | - asyw->point.x); + if (curs507a_space(wndw)) { + nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 | + asyw->point.x); + } } const struct nv50_wimm_func diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c index 23fb29d41efe..96dff4f09f57 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c @@ -25,14 +25,17 @@ static void cursc37a_update(struct nv50_wndw *wndw, u32 *interlock) { - nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001); + if (curs507a_space(wndw)) + nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001); } static void cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) { - nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 | - asyw->point.x); + if (curs507a_space(wndw)) { + nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 | + asyw->point.x); + } } static const struct nv50_wimm_func diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 4d1c58468dbc..6be9df1820c5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -45,6 +45,7 @@ #include <nvif/cl5070.h> #include <nvif/cl507d.h> #include <nvif/event.h> +#include <nvif/timer.h> #include "nouveau_drv.h" #include "nouveau_dma.h" diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c index 2e68fc736fe1..4f7ce57f2036 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c @@ -24,6 +24,8 @@ #include <nouveau_bo.h> +#include <nvif/timer.h> + static void ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index caf397475918..a7412b9d3a98 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -97,6 +97,7 @@ struct nv50_wimm_func { }; extern const struct nv50_wimm_func curs507a; +bool curs507a_space(struct nv50_wndw *); int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32, struct nv50_wndw **); diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h index 25d969dcf67d..c2a572c67a76 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/device.h +++ b/drivers/gpu/drm/nouveau/include/nvif/device.h @@ -23,27 +23,6 @@ int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32, void nvif_device_fini(struct nvif_device *); u64 nvif_device_time(struct nvif_device *); -/* Delay based on GPU time (ie. PTIMER). - * - * Will return -ETIMEDOUT unless the loop was terminated with 'break', - * where it will return the number of nanoseconds taken instead. - */ -#define nvif_nsec(d,n,cond...) ({ \ - struct nvif_device *_device = (d); \ - u64 _nsecs = (n), _time0 = nvif_device_time(_device); \ - s64 _taken = 0; \ - \ - do { \ - cond \ - } while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\ - \ - if (_taken >= _nsecs) \ - _taken = -ETIMEDOUT; \ - _taken; \ -}) -#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond) -#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond) - /*XXX*/ #include <subdev/bios.h> #include <subdev/fb.h> diff --git a/drivers/gpu/drm/nouveau/include/nvif/timer.h b/drivers/gpu/drm/nouveau/include/nvif/timer.h new file mode 100644 index 000000000000..57587a985c4b --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvif/timer.h @@ -0,0 +1,35 @@ +#ifndef __NVIF_TIMER_H__ +#define __NVIF_TIMER_H__ +#include <nvif/os.h> + +struct nvif_timer_wait { + struct nvif_device *device; + u64 limit; + u64 time0; + u64 time1; + int reads; +}; + +void nvif_timer_wait_init(struct nvif_device *, u64 nsec, + struct nvif_timer_wait *); +s64 nvif_timer_wait_test(struct nvif_timer_wait *); + +/* Delay based on GPU time (ie. PTIMER). + * + * Will return -ETIMEDOUT unless the loop was terminated with 'break', + * where it will return the number of nanoseconds taken instead. + */ +#define nvif_nsec(d,n,cond...) ({ \ + struct nvif_timer_wait _wait; \ + s64 _taken = 0; \ + \ + nvif_timer_wait_init((d), (n), &_wait); \ + do { \ + cond \ + } while ((_taken = nvif_timer_wait_test(&_wait)) >= 0); \ + \ + _taken; \ +}) +#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond) +#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond) +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h index 03c11826b693..6825574d93c2 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/user.h +++ b/drivers/gpu/drm/nouveau/include/nvif/user.h @@ -10,6 +10,7 @@ struct nvif_user { struct nvif_user_func { void (*doorbell)(struct nvif_user *, u32 token); + u64 (*time)(struct nvif_user *); }; int nvif_user_init(struct nvif_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2b4b21b02e40..c40f127de3d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1494,8 +1494,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) ret = nvif_object_map_handle(&mem->mem.object, &args, argc, &handle, &length); - if (ret != 1) - return ret ? ret : -EINVAL; + if (ret != 1) { + if (WARN_ON(ret == 0)) + return -EINVAL; + if (ret == -ENOSPC) + return -EAGAIN; + return ret; + } reg->bus.base = 0; reg->bus.offset = handle; diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 7dfbbbc1beea..63cb5e432f8a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -217,39 +217,33 @@ static const struct nouveau_debugfs_files { {"pstate", &nouveau_pstate_fops}, }; -int +void nouveau_drm_debugfs_init(struct drm_minor *minor) { struct nouveau_drm *drm = nouveau_drm(minor->dev); struct dentry *dentry; - int i, ret; + int i; for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) { - dentry = debugfs_create_file(nouveau_debugfs_files[i].name, - S_IRUGO | S_IWUSR, - minor->debugfs_root, minor->dev, - nouveau_debugfs_files[i].fops); - if (!dentry) - return -ENOMEM; + debugfs_create_file(nouveau_debugfs_files[i].name, + S_IRUGO | S_IWUSR, + minor->debugfs_root, minor->dev, + nouveau_debugfs_files[i].fops); } - ret = drm_debugfs_create_files(nouveau_debugfs_list, - NOUVEAU_DEBUGFS_ENTRIES, - minor->debugfs_root, minor); - if (ret) - return ret; + drm_debugfs_create_files(nouveau_debugfs_list, + NOUVEAU_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); /* Set the size of the vbios since we know it, and it's confusing to * userspace if it wants to seek() but the file has a length of 0 */ dentry = debugfs_lookup("vbios.rom", minor->debugfs_root); if (!dentry) - return 0; + return; d_inode(dentry)->i_size = drm->vbios.length; dput(dentry); - - return 0; } int diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h index 8909c010e8ea..77f0323b38ba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h @@ -18,15 +18,13 @@ nouveau_debugfs(struct drm_device *dev) return nouveau_drm(dev)->debugfs; } -extern int nouveau_drm_debugfs_init(struct drm_minor *); +extern void nouveau_drm_debugfs_init(struct drm_minor *); extern int nouveau_debugfs_init(struct nouveau_drm *); extern void nouveau_debugfs_fini(struct nouveau_drm *); #else -static inline int +static inline void nouveau_drm_debugfs_init(struct drm_minor *minor) -{ - return 0; -} +{} static inline int nouveau_debugfs_init(struct nouveau_drm *drm) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 0ad5d87b5a8e..ad89e09a0be3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -28,6 +28,7 @@ #include <nvif/class.h> #include <nvif/object.h> +#include <nvif/if000c.h> #include <nvif/if500b.h> #include <nvif/if900b.h> @@ -176,6 +177,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) .end = vmf->address + PAGE_SIZE, .src = &src, .dst = &dst, + .src_owner = drm->dev, }; /* @@ -526,6 +528,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE; drm->dmem->pagemap.res = *res; drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops; + drm->dmem->pagemap.owner = drm->dev; if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap))) goto out_free; @@ -669,12 +672,6 @@ out: return ret; } -static inline bool -nouveau_dmem_page(struct nouveau_drm *drm, struct page *page) -{ - return is_device_private_page(page) && drm->dmem == page_to_dmem(page); -} - void nouveau_dmem_convert_pfn(struct nouveau_drm *drm, struct hmm_range *range) @@ -690,18 +687,12 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm, if (page == NULL) continue; - if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) { + if (!is_device_private_page(page)) continue; - } - - if (!nouveau_dmem_page(drm, page)) { - WARN(1, "Some unknown device memory !\n"); - range->pfns[i] = 0; - continue; - } addr = nouveau_dmem_page_addr(page); range->pfns[i] &= ((1UL << range->pfn_shift) - 1); range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift; + range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM; } } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 6b1629c14dd7..ca4087f5a15b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev) kfree(drm); } +/* + * On some Intel PCIe bridge controllers doing a + * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear. + * Skipping the intermediate D3hot step seems to make it work again. This is + * probably caused by not meeting the expectation the involved AML code has + * when the GPU is put into D3hot state before invoking it. + * + * This leads to various manifestations of this issue: + * - AML code execution to power on the GPU hits an infinite loop (as the + * code waits on device memory to change). + * - kernel crashes, as all PCI reads return -1, which most code isn't able + * to handle well enough. + * + * In all cases dmesg will contain at least one line like this: + * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3' + * followed by a lot of nouveau timeouts. + * + * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not + * documented PCI config space register 0x248 of the Intel PCIe bridge + * controller (0x1901) in order to change the state of the PCIe link between + * the PCIe port and the GPU. There are alternative code paths using other + * registers, which seem to work fine (executed pre Windows 8): + * - 0xbc bit 0x20 (publicly available documentation claims 'reserved') + * - 0xb0 bit 0x10 (link disable) + * Changing the conditions inside the firmware by poking into the relevant + * addresses does resolve the issue, but it seemed to be ACPI private memory + * and not any device accessible memory at all, so there is no portable way of + * changing the conditions. + * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared. + * + * The only systems where this behavior can be seen are hybrid graphics laptops + * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether + * this issue only occurs in combination with listed Intel PCIe bridge + * controllers and the mentioned GPUs or other devices as well. + * + * documentation on the PCIe bridge controller can be found in the + * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2" + * Section "12 PCI Express* Controller (x16) Registers" + */ + +static void quirk_broken_nv_runpm(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct pci_dev *bridge = pci_upstream_bridge(pdev); + + if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (bridge->device) { + case 0x1901: + drm->old_pm_cap = pdev->pm_cap; + pdev->pm_cap = 0; + NV_INFO(drm, "Disabling PCI power management to avoid bug\n"); + break; + } +} + static int nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { @@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, if (ret) goto fail_drm_dev_init; + quirk_broken_nv_runpm(pdev); return 0; fail_drm_dev_init: @@ -734,7 +793,11 @@ static void nouveau_drm_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + /* revert our workaround */ + if (drm->old_pm_cap) + pdev->pm_cap = drm->old_pm_cap; nouveau_drm_device_remove(dev); pci_disable_device(pdev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c2c332fbde97..2a6519737800 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -140,6 +140,8 @@ struct nouveau_drm { struct list_head clients; + u8 old_pm_cap; + struct { struct agp_bridge_data *bridge; u32 base; diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index df9bf1fd1bc0..645fedd77e21 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -171,6 +171,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, mm = get_task_mm(current); down_read(&mm->mmap_sem); + if (!cli->svm.svmm) { + up_read(&mm->mmap_sem); + return -EINVAL; + } + for (addr = args->va_start, end = args->va_start + size; addr < end;) { struct vm_area_struct *vma; unsigned long next; @@ -179,6 +184,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, if (!vma) break; + addr = max(addr, vma->vm_start); next = min(vma->vm_end, end); /* This is a best effort so we ignore errors */ nouveau_dmem_migrate_vma(cli->drm, vma, addr, next); @@ -367,7 +373,6 @@ static const u64 nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = { [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V, [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W, - [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM, }; static const u64 @@ -541,7 +546,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, range.default_flags = 0; range.pfn_flags_mask = -1UL; down_read(&mm->mmap_sem); - ret = hmm_range_fault(&range, 0); + ret = hmm_range_fault(&range); up_read(&mm->mmap_sem); if (ret <= 0) { if (ret == 0 || ret == -EBUSY) @@ -657,9 +662,6 @@ nouveau_svm_fault(struct nvif_notify *notify) limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT); if (start < svmm->unmanaged.limit) limit = min_t(u64, limit, svmm->unmanaged.start); - else - if (limit > svmm->unmanaged.start) - start = max_t(u64, start, svmm->unmanaged.limit); SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); mm = svmm->notifier.mm; diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild index 50d583d63807..f194d354c1f5 100644 --- a/drivers/gpu/drm/nouveau/nvif/Kbuild +++ b/drivers/gpu/drm/nouveau/nvif/Kbuild @@ -8,6 +8,7 @@ nvif-y += nvif/fifo.o nvif-y += nvif/mem.o nvif-y += nvif/mmu.o nvif-y += nvif/notify.o +nvif-y += nvif/timer.o nvif-y += nvif/vmm.o # Usermode classes diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c index 1ec101ba3b42..0e92db44bbc8 100644 --- a/drivers/gpu/drm/nouveau/nvif/device.c +++ b/drivers/gpu/drm/nouveau/nvif/device.c @@ -27,11 +27,15 @@ u64 nvif_device_time(struct nvif_device *device) { - struct nv_device_time_v0 args = {}; - int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME, - &args, sizeof(args)); - WARN_ON_ONCE(ret != 0); - return args.time; + if (!device->user.func) { + struct nv_device_time_v0 args = {}; + int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME, + &args, sizeof(args)); + WARN_ON_ONCE(ret != 0); + return args.time; + } + + return device->user.func->time(&device->user); } void diff --git a/drivers/gpu/drm/nouveau/nvif/timer.c b/drivers/gpu/drm/nouveau/nvif/timer.c new file mode 100644 index 000000000000..602c1a258d10 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/timer.c @@ -0,0 +1,56 @@ +/* + * Copyright 2020 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <nvif/timer.h> +#include <nvif/device.h> + +s64 +nvif_timer_wait_test(struct nvif_timer_wait *wait) +{ + u64 time = nvif_device_time(wait->device); + + if (wait->reads == 0) { + wait->time0 = time; + wait->time1 = time; + } + + if (wait->time1 == time) { + if (WARN_ON(wait->reads++ == 16)) + return -ETIMEDOUT; + } else { + wait->time1 = time; + wait->reads = 1; + } + + if (wait->time1 - wait->time0 > wait->limit) + return -ETIMEDOUT; + + return wait->time1 - wait->time0; +} + +void +nvif_timer_wait_init(struct nvif_device *device, u64 nsec, + struct nvif_timer_wait *wait) +{ + wait->device = device; + wait->limit = nsec; + wait->reads = 0; +} diff --git a/drivers/gpu/drm/nouveau/nvif/userc361.c b/drivers/gpu/drm/nouveau/nvif/userc361.c index 19f9958e7e01..1116f871b272 100644 --- a/drivers/gpu/drm/nouveau/nvif/userc361.c +++ b/drivers/gpu/drm/nouveau/nvif/userc361.c @@ -21,6 +21,19 @@ */ #include <nvif/user.h> +static u64 +nvif_userc361_time(struct nvif_user *user) +{ + u32 hi, lo; + + do { + hi = nvif_rd32(&user->object, 0x084); + lo = nvif_rd32(&user->object, 0x080); + } while (hi != nvif_rd32(&user->object, 0x084)); + + return ((u64)hi << 32 | lo); +} + static void nvif_userc361_doorbell(struct nvif_user *user, u32 token) { @@ -30,4 +43,5 @@ nvif_userc361_doorbell(struct nvif_user *user, u32 token) const struct nvif_user_func nvif_userc361 = { .doorbell = nvif_userc361_doorbell, + .time = nvif_userc361_time, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index dd8f85b8b3a7..f2f5636efac4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -1981,8 +1981,34 @@ gf100_gr_init_(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &base->engine.subdev; + struct nvkm_device *device = subdev->device; + bool reset = device->chipset == 0x137 || device->chipset == 0x138; u32 ret; + /* On certain GP107/GP108 boards, we trigger a weird issue where + * GR will stop responding to PRI accesses after we've asked the + * SEC2 RTOS to boot the GR falcons. This happens with far more + * frequency when cold-booting a board (ie. returning from D3). + * + * The root cause for this is not known and has proven difficult + * to isolate, with many avenues being dead-ends. + * + * A workaround was discovered by Karol, whereby putting GR into + * reset for an extended period right before initialisation + * prevents the problem from occuring. + * + * XXX: As RM does not require any such workaround, this is more + * of a hack than a true fix. + */ + reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset); + if (reset) { + nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); + nvkm_rd32(device, 0x000200); + msleep(50); + nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); + nvkm_rd32(device, 0x000200); + } + nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c index 232a9d7c51e5..e770c9497871 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c @@ -25,6 +25,9 @@ MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin"); static const struct nvkm_sec2_fwif gp108_sec2_fwif[] = { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c index b6ebd95c9ba1..a8295653ceab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c @@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver, return 0; } +MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/tu104/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/tu104/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/tu104/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/tu106/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/tu106/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/tu106/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/tu116/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/tu116/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/tu116/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/tu117/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/tu117/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin"); + static const struct nvkm_sec2_fwif tu102_sec2_fwif[] = { { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c index 9b91da09dc5f..8d9812a51ef6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c @@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name) else return ERR_PTR(-ENODEV); + if (!pdev->rom || pdev->romlen == 0) + return ERR_PTR(-ENODEV); + if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { + priv->size = pdev->romlen; if (ret = -ENODEV, - (priv->rom = pci_platform_rom(pdev, &priv->size))) + (priv->rom = ioremap(pdev->rom, pdev->romlen))) return priv; kfree(priv); } @@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(ret); } +static void +platform_fini(void *data) +{ + struct priv *priv = data; + + iounmap(priv->rom); + kfree(priv); +} + const struct nvbios_source nvbios_platform = { .name = "PLATFORM", .init = platform_init, - .fini = (void(*)(void *))kfree, + .fini = platform_fini, .read = pcirom_read, .rw = true, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c index 03b355dabab3..abf3eda683f0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c @@ -36,8 +36,8 @@ probe_monitoring_device(struct nvkm_i2c_bus *bus, request_module("%s%s", I2C_MODULE_PREFIX, info->type); - client = i2c_new_device(&bus->i2c, info); - if (!client) + client = i2c_new_client_device(&bus->i2c, info); + if (IS_ERR(client)) return false; if (!client->dev.driver || diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index b76fc2b56227..4d5739fa4a5d 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1348,9 +1348,15 @@ static int dss_component_compare(struct device *dev, void *data) return dev == child; } +struct dss_component_match_data { + struct device *dev; + struct component_match **match; +}; + static int dss_add_child_component(struct device *dev, void *data) { - struct component_match **match = data; + struct dss_component_match_data *cmatch = data; + struct component_match **match = cmatch->match; /* * HACK @@ -1361,7 +1367,17 @@ static int dss_add_child_component(struct device *dev, void *data) if (strstr(dev_name(dev), "rfbi")) return 0; - component_match_add(dev->parent, match, dss_component_compare, dev); + /* + * Handle possible interconnect target modules defined within the DSS. + * The DSS components can be children of an interconnect target module + * after the device tree has been updated for the module data. + * See also omapdss_boot_init() for compatible fixup. + */ + if (strstr(dev_name(dev), "target-module")) + return device_for_each_child(dev, cmatch, + dss_add_child_component); + + component_match_add(cmatch->dev, match, dss_component_compare, dev); return 0; } @@ -1404,6 +1420,7 @@ static int dss_probe_hardware(struct dss_device *dss) static int dss_probe(struct platform_device *pdev) { const struct soc_device_attribute *soc; + struct dss_component_match_data cmatch; struct component_match *match = NULL; struct resource *dss_mem; struct dss_device *dss; @@ -1481,7 +1498,9 @@ static int dss_probe(struct platform_device *pdev) omapdss_gather_components(&pdev->dev); - device_for_each_child(&pdev->dev, &match, dss_add_child_component); + cmatch.dev = &pdev->dev; + cmatch.match = &match; + device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component); r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); if (r) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 00372f4ce711..72a7da7bfff1 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -178,9 +178,24 @@ static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = { {}, }; +static void __init omapdss_find_children(struct device_node *np) +{ + struct device_node *child; + + for_each_available_child_of_node(np, child) { + if (!of_find_property(child, "compatible", NULL)) + continue; + + omapdss_walk_device(child, true); + + if (of_device_is_compatible(child, "ti,sysc")) + omapdss_find_children(child); + } +} + static int __init omapdss_boot_init(void) { - struct device_node *dss, *child; + struct device_node *dss; INIT_LIST_HEAD(&dss_conv_list); @@ -190,13 +205,7 @@ static int __init omapdss_boot_init(void) goto put_node; omapdss_walk_device(dss, true); - - for_each_available_child_of_node(dss, child) { - if (!of_find_property(child, "compatible", NULL)) - continue; - - omapdss_walk_device(child, true); - } + omapdss_find_children(dss); while (!list_empty(&dss_conv_list)) { struct dss_conv_node *n; diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index 34dfb33145b4..b57fbe8a0ac2 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c @@ -80,31 +80,16 @@ static struct drm_info_list omap_dmm_debugfs_list[] = { {"tiler_map", tiler_map_show, 0}, }; -int omap_debugfs_init(struct drm_minor *minor) +void omap_debugfs_init(struct drm_minor *minor) { - struct drm_device *dev = minor->dev; - int ret; - - ret = drm_debugfs_create_files(omap_debugfs_list, - ARRAY_SIZE(omap_debugfs_list), - minor->debugfs_root, minor); - - if (ret) { - dev_err(dev->dev, "could not install omap_debugfs_list\n"); - return ret; - } + drm_debugfs_create_files(omap_debugfs_list, + ARRAY_SIZE(omap_debugfs_list), + minor->debugfs_root, minor); if (dmm_is_available()) - ret = drm_debugfs_create_files(omap_dmm_debugfs_list, - ARRAY_SIZE(omap_dmm_debugfs_list), - minor->debugfs_root, minor); - - if (ret) { - dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n"); - return ret; - } - - return ret; + drm_debugfs_create_files(omap_dmm_debugfs_list, + ARRAY_SIZE(omap_dmm_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 7c4b66efcaa7..8a1fac680138 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -82,6 +82,6 @@ struct omap_drm_private { }; -int omap_debugfs_init(struct drm_minor *minor); +void omap_debugfs_init(struct drm_minor *minor); #endif /* __OMAPDRM_DRV_H__ */ diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index a1723c1b5fbf..d56258b9fcaf 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -137,6 +137,17 @@ config DRM_PANEL_KINGDISPLAY_KD097D04 24 bit RGB per pixel. It provides a MIPI DSI interface to the host and has a built-in LED backlight. +config DRM_PANEL_LEADTEK_LTK050H3146W + tristate "Leadtek LTK050H3146W panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Leadtek LTK050H3146W + TFT-LCD modules. The panel has a 720x1280 resolution and uses + 24 bit RGB per pixel. It provides a MIPI DSI interface to + the host and has a built-in LED backlight. + config DRM_PANEL_LEADTEK_LTK500HD1829 tristate "Leadtek LTK500HD1829 panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 96a883cd6630..2335a1e32ae0 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o +obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 48a164257d18..f89861c8598a 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -696,6 +696,34 @@ static const struct panel_desc auo_b101uan08_3_desc = { .init_cmds = auo_b101uan08_3_init_cmd, }; +static const struct drm_display_mode boe_tv105wum_nw0_default_mode = { + .clock = 159260, + .hdisplay = 1200, + .hsync_start = 1200 + 80, + .hsync_end = 1200 + 80 + 24, + .htotal = 1200 + 80 + 24 + 60, + .vdisplay = 1920, + .vsync_start = 1920 + 10, + .vsync_end = 1920 + 10 + 2, + .vtotal = 1920 + 10 + 2 + 14, + .vrefresh = 60, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static const struct panel_desc boe_tv105wum_nw0_desc = { + .modes = &boe_tv105wum_nw0_default_mode, + .bpc = 8, + .size = { + .width_mm = 141, + .height_mm = 226, + }, + .lanes = 4, + .format = MIPI_DSI_FMT_RGB888, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_LPM, + .init_cmds = boe_init_cmd, +}; + static int boe_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -834,6 +862,9 @@ static const struct of_device_id boe_of_match[] = { { .compatible = "auo,b101uan08.3", .data = &auo_b101uan08_3_desc }, + { .compatible = "boe,tv105wum-nw0", + .data = &boe_tv105wum_nw0_desc + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, boe_of_match); diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c new file mode 100644 index 000000000000..5a7a31c8513e --- /dev/null +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c @@ -0,0 +1,691 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> + +#include <video/display_timing.h> +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> + +struct ltk050h3146w_cmd { + char cmd; + char data; +}; + +struct ltk050h3146w; +struct ltk050h3146w_desc { + const struct drm_display_mode *mode; + int (*init)(struct ltk050h3146w *ctx); +}; + +struct ltk050h3146w { + struct device *dev; + struct drm_panel panel; + struct gpio_desc *reset_gpio; + struct regulator *vci; + struct regulator *iovcc; + const struct ltk050h3146w_desc *panel_desc; + bool prepared; +}; + +static const struct ltk050h3146w_cmd page1_cmds[] = { + { 0x22, 0x0A }, /* BGR SS GS */ + { 0x31, 0x00 }, /* column inversion */ + { 0x53, 0xA2 }, /* VCOM1 */ + { 0x55, 0xA2 }, /* VCOM2 */ + { 0x50, 0x81 }, /* VREG1OUT=5V */ + { 0x51, 0x85 }, /* VREG2OUT=-5V */ + { 0x62, 0x0D }, /* EQT Time setting */ +/* + * The vendor init selected page 1 here _again_ + * Is this supposed to be page 2? + */ + { 0xA0, 0x00 }, + { 0xA1, 0x1A }, + { 0xA2, 0x28 }, + { 0xA3, 0x13 }, + { 0xA4, 0x16 }, + { 0xA5, 0x29 }, + { 0xA6, 0x1D }, + { 0xA7, 0x1E }, + { 0xA8, 0x84 }, + { 0xA9, 0x1C }, + { 0xAA, 0x28 }, + { 0xAB, 0x75 }, + { 0xAC, 0x1A }, + { 0xAD, 0x19 }, + { 0xAE, 0x4D }, + { 0xAF, 0x22 }, + { 0xB0, 0x28 }, + { 0xB1, 0x54 }, + { 0xB2, 0x66 }, + { 0xB3, 0x39 }, + { 0xC0, 0x00 }, + { 0xC1, 0x1A }, + { 0xC2, 0x28 }, + { 0xC3, 0x13 }, + { 0xC4, 0x16 }, + { 0xC5, 0x29 }, + { 0xC6, 0x1D }, + { 0xC7, 0x1E }, + { 0xC8, 0x84 }, + { 0xC9, 0x1C }, + { 0xCA, 0x28 }, + { 0xCB, 0x75 }, + { 0xCC, 0x1A }, + { 0xCD, 0x19 }, + { 0xCE, 0x4D }, + { 0xCF, 0x22 }, + { 0xD0, 0x28 }, + { 0xD1, 0x54 }, + { 0xD2, 0x66 }, + { 0xD3, 0x39 }, +}; + +static const struct ltk050h3146w_cmd page3_cmds[] = { + { 0x01, 0x00 }, + { 0x02, 0x00 }, + { 0x03, 0x73 }, + { 0x04, 0x00 }, + { 0x05, 0x00 }, + { 0x06, 0x0a }, + { 0x07, 0x00 }, + { 0x08, 0x00 }, + { 0x09, 0x01 }, + { 0x0a, 0x00 }, + { 0x0b, 0x00 }, + { 0x0c, 0x01 }, + { 0x0d, 0x00 }, + { 0x0e, 0x00 }, + { 0x0f, 0x1d }, + { 0x10, 0x1d }, + { 0x11, 0x00 }, + { 0x12, 0x00 }, + { 0x13, 0x00 }, + { 0x14, 0x00 }, + { 0x15, 0x00 }, + { 0x16, 0x00 }, + { 0x17, 0x00 }, + { 0x18, 0x00 }, + { 0x19, 0x00 }, + { 0x1a, 0x00 }, + { 0x1b, 0x00 }, + { 0x1c, 0x00 }, + { 0x1d, 0x00 }, + { 0x1e, 0x40 }, + { 0x1f, 0x80 }, + { 0x20, 0x06 }, + { 0x21, 0x02 }, + { 0x22, 0x00 }, + { 0x23, 0x00 }, + { 0x24, 0x00 }, + { 0x25, 0x00 }, + { 0x26, 0x00 }, + { 0x27, 0x00 }, + { 0x28, 0x33 }, + { 0x29, 0x03 }, + { 0x2a, 0x00 }, + { 0x2b, 0x00 }, + { 0x2c, 0x00 }, + { 0x2d, 0x00 }, + { 0x2e, 0x00 }, + { 0x2f, 0x00 }, + { 0x30, 0x00 }, + { 0x31, 0x00 }, + { 0x32, 0x00 }, + { 0x33, 0x00 }, + { 0x34, 0x04 }, + { 0x35, 0x00 }, + { 0x36, 0x00 }, + { 0x37, 0x00 }, + { 0x38, 0x3C }, + { 0x39, 0x35 }, + { 0x3A, 0x01 }, + { 0x3B, 0x40 }, + { 0x3C, 0x00 }, + { 0x3D, 0x01 }, + { 0x3E, 0x00 }, + { 0x3F, 0x00 }, + { 0x40, 0x00 }, + { 0x41, 0x88 }, + { 0x42, 0x00 }, + { 0x43, 0x00 }, + { 0x44, 0x1F }, + { 0x50, 0x01 }, + { 0x51, 0x23 }, + { 0x52, 0x45 }, + { 0x53, 0x67 }, + { 0x54, 0x89 }, + { 0x55, 0xab }, + { 0x56, 0x01 }, + { 0x57, 0x23 }, + { 0x58, 0x45 }, + { 0x59, 0x67 }, + { 0x5a, 0x89 }, + { 0x5b, 0xab }, + { 0x5c, 0xcd }, + { 0x5d, 0xef }, + { 0x5e, 0x11 }, + { 0x5f, 0x01 }, + { 0x60, 0x00 }, + { 0x61, 0x15 }, + { 0x62, 0x14 }, + { 0x63, 0x0E }, + { 0x64, 0x0F }, + { 0x65, 0x0C }, + { 0x66, 0x0D }, + { 0x67, 0x06 }, + { 0x68, 0x02 }, + { 0x69, 0x07 }, + { 0x6a, 0x02 }, + { 0x6b, 0x02 }, + { 0x6c, 0x02 }, + { 0x6d, 0x02 }, + { 0x6e, 0x02 }, + { 0x6f, 0x02 }, + { 0x70, 0x02 }, + { 0x71, 0x02 }, + { 0x72, 0x02 }, + { 0x73, 0x02 }, + { 0x74, 0x02 }, + { 0x75, 0x01 }, + { 0x76, 0x00 }, + { 0x77, 0x14 }, + { 0x78, 0x15 }, + { 0x79, 0x0E }, + { 0x7a, 0x0F }, + { 0x7b, 0x0C }, + { 0x7c, 0x0D }, + { 0x7d, 0x06 }, + { 0x7e, 0x02 }, + { 0x7f, 0x07 }, + { 0x80, 0x02 }, + { 0x81, 0x02 }, + { 0x82, 0x02 }, + { 0x83, 0x02 }, + { 0x84, 0x02 }, + { 0x85, 0x02 }, + { 0x86, 0x02 }, + { 0x87, 0x02 }, + { 0x88, 0x02 }, + { 0x89, 0x02 }, + { 0x8A, 0x02 }, +}; + +static const struct ltk050h3146w_cmd page4_cmds[] = { + { 0x70, 0x00 }, + { 0x71, 0x00 }, + { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */ + { 0x84, 0x0F }, /* VGH clamp level 15V */ + { 0x85, 0x0D }, /* VGL clamp level (-10V) */ + { 0x32, 0xAC }, + { 0x8C, 0x80 }, + { 0x3C, 0xF5 }, + { 0xB5, 0x07 }, /* GAMMA OP */ + { 0x31, 0x45 }, /* SOURCE OP */ + { 0x3A, 0x24 }, /* PS_EN OFF */ + { 0x88, 0x33 }, /* LVD */ +}; + +static inline +struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel) +{ + return container_of(panel, struct ltk050h3146w, panel); +} + +#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + /* + * Init sequence was supplied by the panel vendor without much + * documentation. + */ + dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8); + dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, + 0x01); + dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5); + dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5); + dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); + + dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07); + dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, + 0x28, 0x04, 0xcc, 0xcc, 0xcc); + dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04); + dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2); + dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03); + dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12); + dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, + 0x80); + dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, + 0x16, 0x00, 0x00); + dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, + 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, + 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, + 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, + 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); + dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, + 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, + 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, + 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, + 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, + 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, + 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); + dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, + 0x21, 0x00, 0x60); + dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00); + dsi_dcs_write_seq(dsi, 0xde, 0x02); + dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c); + dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04); + dsi_dcs_write_seq(dsi, 0xc1, 0x11); + dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); + dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84); + dsi_dcs_write_seq(dsi, 0xde, 0x00); + + ret = mipi_dsi_dcs_set_tear_on(dsi, 1); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n", + ret); + return ret; + } + + msleep(60); + + return 0; +} + +static const struct drm_display_mode ltk050h3146w_mode = { + .hdisplay = 720, + .hsync_start = 720 + 42, + .hsync_end = 720 + 42 + 8, + .htotal = 720 + 42 + 8 + 42, + .vdisplay = 1280, + .vsync_start = 1280 + 12, + .vsync_end = 1280 + 12 + 4, + .vtotal = 1280 + 12 + 4 + 18, + .clock = 64018, + .width_mm = 62, + .height_mm = 110, +}; + +static const struct ltk050h3146w_desc ltk050h3146w_data = { + .mode = <k050h3146w_mode, + .init = ltk050h3146w_init_sequence, +}; + +static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + u8 d[3] = { 0x98, 0x81, page }; + + return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d)); +} + +static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page, + const struct ltk050h3146w_cmd *cmds, + int num) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int i, ret; + + ret = ltk050h3146w_a2_select_page(ctx, page); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n", + page, ret); + return ret; + } + + for (i = 0; i < num; i++) { + ret = mipi_dsi_generic_write(dsi, &cmds[i], + sizeof(struct ltk050h3146w_cmd)); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, + "failed to write page %d init cmds: %d\n", + page, ret); + return ret; + } + } + + return 0; +} + +static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + /* + * Init sequence was supplied by the panel vendor without much + * documentation. + */ + ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds, + ARRAY_SIZE(page3_cmds)); + if (ret < 0) + return ret; + + ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds, + ARRAY_SIZE(page4_cmds)); + if (ret < 0) + return ret; + + ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds, + ARRAY_SIZE(page1_cmds)); + if (ret < 0) + return ret; + + ret = ltk050h3146w_a2_select_page(ctx, 0); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret); + return ret; + } + + /* vendor code called this without param, where there should be one */ + ret = mipi_dsi_dcs_set_tear_on(dsi, 0); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n", + ret); + return ret; + } + + msleep(60); + + return 0; +} + +static const struct drm_display_mode ltk050h3146w_a2_mode = { + .hdisplay = 720, + .hsync_start = 720 + 42, + .hsync_end = 720 + 42 + 10, + .htotal = 720 + 42 + 10 + 60, + .vdisplay = 1280, + .vsync_start = 1280 + 18, + .vsync_end = 1280 + 18 + 4, + .vtotal = 1280 + 18 + 4 + 12, + .clock = 65595, + .width_mm = 62, + .height_mm = 110, +}; + +static const struct ltk050h3146w_desc ltk050h3146w_a2_data = { + .mode = <k050h3146w_a2_mode, + .init = ltk050h3146w_a2_init_sequence, +}; + +static int ltk050h3146w_unprepare(struct drm_panel *panel) +{ + struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + if (!ctx->prepared) + return 0; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n", + ret); + return ret; + } + + mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n", + ret); + return ret; + } + + regulator_disable(ctx->iovcc); + regulator_disable(ctx->vci); + + ctx->prepared = false; + + return 0; +} + +static int ltk050h3146w_prepare(struct drm_panel *panel) +{ + struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + if (ctx->prepared) + return 0; + + DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n"); + ret = regulator_enable(ctx->vci); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, + "Failed to enable vci supply: %d\n", ret); + return ret; + } + ret = regulator_enable(ctx->iovcc); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, + "Failed to enable iovcc supply: %d\n", ret); + goto disable_vci; + } + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(20); + + ret = ctx->panel_desc->init(ctx); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n", + ret); + goto disable_iovcc; + } + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret); + goto disable_iovcc; + } + + /* T9: 120ms */ + msleep(120); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret); + goto disable_iovcc; + } + + msleep(50); + + ctx->prepared = true; + + return 0; + +disable_iovcc: + regulator_disable(ctx->iovcc); +disable_vci: + regulator_disable(ctx->vci); + return ret; +} + +static int ltk050h3146w_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs ltk050h3146w_funcs = { + .unprepare = ltk050h3146w_unprepare, + .prepare = ltk050h3146w_prepare, + .get_modes = ltk050h3146w_get_modes, +}; + +static int ltk050h3146w_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct ltk050h3146w *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->panel_desc = of_device_get_match_data(dev); + if (!ctx->panel_desc) + return -EINVAL; + + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->reset_gpio)) { + DRM_DEV_ERROR(dev, "cannot get reset gpio\n"); + return PTR_ERR(ctx->reset_gpio); + } + + ctx->vci = devm_regulator_get(dev, "vci"); + if (IS_ERR(ctx->vci)) { + ret = PTR_ERR(ctx->vci); + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, + "Failed to request vci regulator: %d\n", + ret); + return ret; + } + + ctx->iovcc = devm_regulator_get(dev, "iovcc"); + if (IS_ERR(ctx->iovcc)) { + ret = PTR_ERR(ctx->iovcc); + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, + "Failed to request iovcc regulator: %d\n", + ret); + return ret; + } + + mipi_dsi_set_drvdata(dsi, ctx); + + ctx->dev = dev; + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + + drm_panel_init(&ctx->panel, &dsi->dev, <k050h3146w_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&ctx->panel); + if (ret) + return ret; + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi) +{ + struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = drm_panel_unprepare(&ctx->panel); + if (ret < 0) + DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n", + ret); + + ret = drm_panel_disable(&ctx->panel); + if (ret < 0) + DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n", + ret); +} + +static int ltk050h3146w_remove(struct mipi_dsi_device *dsi) +{ + struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ltk050h3146w_shutdown(dsi); + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n", + ret); + + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id ltk050h3146w_of_match[] = { + { + .compatible = "leadtek,ltk050h3146w", + .data = <k050h3146w_data, + }, + { + .compatible = "leadtek,ltk050h3146w-a2", + .data = <k050h3146w_a2_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match); + +static struct mipi_dsi_driver ltk050h3146w_driver = { + .driver = { + .name = "panel-leadtek-ltk050h3146w", + .of_match_table = ltk050h3146w_of_match, + }, + .probe = ltk050h3146w_probe, + .remove = ltk050h3146w_remove, + .shutdown = ltk050h3146w_shutdown, +}; +module_mipi_dsi_driver(ltk050h3146w_driver); + +MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>"); +MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c index 76ecf2de9c44..113ab9c0396b 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c @@ -377,7 +377,7 @@ static const struct drm_display_mode default_mode = { .vsync_end = 1280 + 30 + 4, .vtotal = 1280 + 30 + 4 + 12, .vrefresh = 60, - .clock = 41600, + .clock = 69217, .width_mm = 62, .height_mm = 110, }; diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index a470810f7dbe..05cae8d62d56 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -49,7 +49,8 @@ enum nt39016_regs { #define NT39016_SYSTEM_STANDBY BIT(1) struct nt39016_panel_info { - struct drm_display_mode display_mode; + const struct drm_display_mode *display_modes; + unsigned int num_modes; u16 width_mm, height_mm; u32 bus_format, bus_flags; }; @@ -212,15 +213,22 @@ static int nt39016_get_modes(struct drm_panel *drm_panel, struct nt39016 *panel = to_nt39016(drm_panel); const struct nt39016_panel_info *panel_info = panel->panel_info; struct drm_display_mode *mode; + unsigned int i; - mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode); - if (!mode) - return -ENOMEM; + for (i = 0; i < panel_info->num_modes; i++) { + mode = drm_mode_duplicate(connector->dev, + &panel_info->display_modes[i]); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); - drm_mode_set_name(mode); + mode->type = DRM_MODE_TYPE_DRIVER; + if (panel_info->num_modes == 1) + mode->type |= DRM_MODE_TYPE_PREFERRED; - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); + drm_mode_probed_add(connector, mode); + } connector->display_info.bpc = 8; connector->display_info.width_mm = panel_info->width_mm; @@ -230,7 +238,7 @@ static int nt39016_get_modes(struct drm_panel *drm_panel, &panel_info->bus_format, 1); connector->display_info.bus_flags = panel_info->bus_flags; - return 1; + return panel_info->num_modes; } static const struct drm_panel_funcs nt39016_funcs = { @@ -316,8 +324,8 @@ static int nt39016_remove(struct spi_device *spi) return 0; } -static const struct nt39016_panel_info kd035g6_info = { - .display_mode = { +static const struct drm_display_mode kd035g6_display_modes[] = { + { /* 60 Hz */ .clock = 6000, .hdisplay = 320, .hsync_start = 320 + 10, @@ -330,6 +338,24 @@ static const struct nt39016_panel_info kd035g6_info = { .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, + { /* 50 Hz */ + .clock = 5400, + .hdisplay = 320, + .hsync_start = 320 + 42, + .hsync_end = 320 + 42 + 50, + .htotal = 320 + 42 + 50 + 20, + .vdisplay = 240, + .vsync_start = 240 + 5, + .vsync_end = 240 + 5 + 1, + .vtotal = 240 + 5 + 1 + 4, + .vrefresh = 50, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, +}; + +static const struct nt39016_panel_info kd035g6_info = { + .display_modes = kd035g6_display_modes, + .num_modes = ARRAY_SIZE(kd035g6_display_modes), .width_mm = 71, .height_mm = 53, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 0ce81b1f36af..003b54ea90d5 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -361,7 +361,6 @@ static int panel_dpi_probe(struct device *dev, struct panel_desc *desc; unsigned int bus_flags; struct videomode vm; - const char *mapping; int ret; np = dev->of_node; @@ -386,16 +385,6 @@ static int panel_dpi_probe(struct device *dev, of_property_read_u32(np, "width-mm", &desc->size.width); of_property_read_u32(np, "height-mm", &desc->size.height); - of_property_read_string(np, "data-mapping", &mapping); - if (!strcmp(mapping, "rgb24")) - desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24; - else if (!strcmp(mapping, "rgb565")) - desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16; - else if (!strcmp(mapping, "bgr666")) - desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18; - else if (!strcmp(mapping, "lvds666")) - desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; - /* Extract bus_flags from display_timing */ bus_flags = 0; vm.flags = timing->flags; @@ -3076,6 +3065,32 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = { .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; +static const struct drm_display_mode starry_kr070pe2t_mode = { + .clock = 33000, + .hdisplay = 800, + .hsync_start = 800 + 209, + .hsync_end = 800 + 209 + 1, + .htotal = 800 + 209 + 1 + 45, + .vdisplay = 480, + .vsync_start = 480 + 22, + .vsync_end = 480 + 22 + 1, + .vtotal = 480 + 22 + 1 + 22, + .vrefresh = 60, +}; + +static const struct panel_desc starry_kr070pe2t = { + .modes = &starry_kr070pe2t_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 152, + .height = 86, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode starry_kr122ea0sra_mode = { .clock = 147000, .hdisplay = 1920, @@ -3727,6 +3742,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "shelly,sca07010-bfn-lnn", .data = &shelly_sca07010_bfn_lnn, }, { + .compatible = "starry,kr070pe2t", + .data = &starry_kr070pe2t, + }, { .compatible = "starry,kr122ea0sra", .data = &starry_kr122ea0sra, }, { diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c index 3c8e82016854..26ca8cdf3e60 100644 --- a/drivers/gpu/drm/pl111/pl111_debugfs.c +++ b/drivers/gpu/drm/pl111/pl111_debugfs.c @@ -51,10 +51,10 @@ static const struct drm_info_list pl111_debugfs_list[] = { {"regs", pl111_debugfs_regs, 0}, }; -int +void pl111_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(pl111_debugfs_list, - ARRAY_SIZE(pl111_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(pl111_debugfs_list, + ARRAY_SIZE(pl111_debugfs_list), + minor->debugfs_root, minor); } diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h index 77d2da9a8a7c..ba399bcb792f 100644 --- a/drivers/gpu/drm/pl111/pl111_drm.h +++ b/drivers/gpu/drm/pl111/pl111_drm.h @@ -84,6 +84,6 @@ struct pl111_drm_dev_private { int pl111_display_init(struct drm_device *dev); irqreturn_t pl111_irq(int irq, void *data); -int pl111_debugfs_init(struct drm_minor *minor); +void pl111_debugfs_init(struct drm_minor *minor); #endif /* _PL111_DRM_H_ */ diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index aa8aa8d9e405..f9ca0f3edbbb 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -90,10 +90,13 @@ static int pl111_modeset_init(struct drm_device *dev) struct drm_panel *panel = NULL; struct drm_bridge *bridge = NULL; bool defer = false; - int ret = 0; + int ret; int i; - drm_mode_config_init(dev); + ret = drmm_mode_config_init(dev); + if (ret) + return ret; + mode_config = &dev->mode_config; mode_config->funcs = &mode_config_funcs; mode_config->min_width = 1; @@ -154,7 +157,7 @@ static int pl111_modeset_init(struct drm_device *dev) DRM_MODE_CONNECTOR_Unknown); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); - goto out_config; + goto finish; } } else if (bridge) { dev_info(dev->dev, "Using non-panel bridge\n"); @@ -197,8 +200,6 @@ static int pl111_modeset_init(struct drm_device *dev) out_bridge: if (panel) drm_panel_bridge_remove(bridge); -out_config: - drm_mode_config_cleanup(dev); finish: return ret; } @@ -343,7 +344,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev) drm_dev_unregister(drm); if (priv->panel) drm_panel_bridge_remove(priv->bridge); - drm_mode_config_cleanup(drm); drm_dev_put(drm); of_reserved_mem_device_release(dev); diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index a4f4175bbdbe..88123047fdd4 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -79,36 +79,30 @@ static struct drm_info_list qxl_debugfs_list[] = { #define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list) #endif -int +void qxl_debugfs_init(struct drm_minor *minor) { #if defined(CONFIG_DEBUG_FS) - int r; struct qxl_device *dev = (struct qxl_device *) minor->dev->dev_private; drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES, minor->debugfs_root, minor); - r = qxl_ttm_debugfs_init(dev); - if (r) { - DRM_ERROR("Failed to init TTM debugfs\n"); - return r; - } + qxl_ttm_debugfs_init(dev); #endif - return 0; } -int qxl_debugfs_add_files(struct qxl_device *qdev, - struct drm_info_list *files, - unsigned int nfiles) +void qxl_debugfs_add_files(struct qxl_device *qdev, + struct drm_info_list *files, + unsigned int nfiles) { unsigned int i; for (i = 0; i < qdev->debugfs_count; i++) { if (qdev->debugfs[i].files == files) { /* Already registered */ - return 0; + return; } } @@ -116,7 +110,7 @@ int qxl_debugfs_add_files(struct qxl_device *qdev, if (i > QXL_DEBUGFS_MAX_COMPONENTS) { DRM_ERROR("Reached maximum number of debugfs components.\n"); DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n"); - return -EINVAL; + return; } qdev->debugfs[qdev->debugfs_count].files = files; qdev->debugfs[qdev->debugfs_count].num_files = nfiles; @@ -126,5 +120,4 @@ int qxl_debugfs_add_files(struct qxl_device *qdev, qdev->ddev.primary->debugfs_root, qdev->ddev.primary); #endif - return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 4fda3f9b29f4..09102e2efabc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -144,8 +144,6 @@ static void qxl_drm_release(struct drm_device *dev) */ qxl_modeset_fini(qdev); qxl_device_fini(qdev); - dev->dev_private = NULL; - kfree(qdev); } static void diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 27e45a2d6b52..435126facc9b 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -190,9 +190,6 @@ struct qxl_debugfs { unsigned int num_files; }; -int qxl_debugfs_add_files(struct qxl_device *rdev, - struct drm_info_list *files, - unsigned int nfiles); int qxl_debugfs_fence_init(struct qxl_device *rdev); struct qxl_device; @@ -442,8 +439,8 @@ int qxl_garbage_collect(struct qxl_device *qdev); /* debugfs */ -int qxl_debugfs_init(struct drm_minor *minor); -int qxl_ttm_debugfs_init(struct qxl_device *qdev); +void qxl_debugfs_init(struct drm_minor *minor); +void qxl_ttm_debugfs_init(struct qxl_device *qdev); /* qxl_prime.c */ int qxl_gem_prime_pin(struct drm_gem_object *obj); @@ -461,9 +458,9 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj, int qxl_irq_init(struct qxl_device *qdev); irqreturn_t qxl_irq_handler(int irq, void *arg); -int qxl_debugfs_add_files(struct qxl_device *qdev, - struct drm_info_list *files, - unsigned int nfiles); +void qxl_debugfs_add_files(struct qxl_device *qdev, + struct drm_info_list *files, + unsigned int nfiles); int qxl_surface_id_alloc(struct qxl_device *qdev, struct qxl_bo *surf); diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 70b20ee4741a..9eed1a375f24 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -27,6 +27,7 @@ #include <linux/pci.h> #include <drm/drm_drv.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include "qxl_drv.h" @@ -121,6 +122,7 @@ int qxl_device_init(struct qxl_device *qdev, qdev->ddev.pdev = pdev; pci_set_drvdata(pdev, &qdev->ddev); qdev->ddev.dev_private = qdev; + drmm_add_final_kfree(&qdev->ddev, qdev); mutex_init(&qdev->gem.mutex); mutex_init(&qdev->update_area_mutex); @@ -218,7 +220,7 @@ int qxl_device_init(struct qxl_device *qdev, &(qdev->ram_header->cursor_ring_hdr), sizeof(struct qxl_command), QXL_CURSOR_RING_SIZE, - qdev->io_base + QXL_IO_NOTIFY_CMD, + qdev->io_base + QXL_IO_NOTIFY_CURSOR, false, &qdev->cursor_event); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 62a5e424971b..93a2eb14844b 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -322,7 +322,7 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data) } #endif -int qxl_ttm_debugfs_init(struct qxl_device *qdev) +void qxl_ttm_debugfs_init(struct qxl_device *qdev) { #if defined(CONFIG_DEBUG_FS) static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; @@ -343,8 +343,6 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev) qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv; } - return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); -#else - return 0; + qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); #endif } diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c index 9b4072f97215..3e76ae5a17ee 100644 --- a/drivers/gpu/drm/r128/ati_pcigart.c +++ b/drivers/gpu/drm/r128/ati_pcigart.c @@ -32,9 +32,10 @@ */ #include <linux/export.h> +#include <linux/pci.h> #include <drm/drm_device.h> -#include <drm/drm_pci.h> +#include <drm/drm_legacy.h> #include <drm/drm_print.h> #include "ati_pcigart.h" diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore index 403eb3a5891f..9c1a94153983 100644 --- a/drivers/gpu/drm/radeon/.gitignore +++ b/drivers/gpu/drm/radeon/.gitignore @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only mkregtable *_reg_safe.h diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 848ef68d9086..5d2591725189 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2111,7 +2111,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) ucOverdriveThermalController]; info.addr = power_info->info.ucOverdriveControllerAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); } } num_modes = power_info->info.ucNumOfPowerModeEntries; @@ -2351,7 +2351,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r const char *name = pp_lib_thermal_controller_names[controller->ucType]; info.addr = controller->ucI2cAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); } } else { DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index c42f73fad3e3..bb29cf02974d 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev) static bool radeon_read_platform_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = rdev->pdev->rom; + size_t romlen = rdev->pdev->romlen; + void __iomem *bios; rdev->bios = NULL; - bios = pci_platform_rom(rdev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + rdev->bios = kzalloc(romlen, GFP_KERNEL); + if (!rdev->bios) return false; - } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - return false; - } + + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; + + memcpy_fromio(rdev->bios, bios, romlen); + iounmap(bios); + + if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) + goto free_bios; return true; +free_bios: + kfree(rdev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index c3e49c973812..d3c04df7e75d 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2704,7 +2704,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) const char *name = thermal_controller_names[thermal_controller]; info.addr = i2c_addr >> 1; strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); } } } else { @@ -2721,7 +2721,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) const char *name = "f75375"; info.addr = 0x28; strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); DRM_INFO("Possible %s thermal controller at 0x%02x\n", name, info.addr); } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 59f8186a2415..bbb0883e8ce6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -36,6 +36,7 @@ #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> #include <linux/mmu_notifier.h> +#include <linux/pci.h> #include <drm/drm_agpsupport.h> #include <drm/drm_crtc_helper.h> @@ -44,7 +45,6 @@ #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_ioctl.h> -#include <drm/drm_pci.h> #include <drm/drm_pciids.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 654e2dd08146..3e67cf70f040 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -530,7 +530,6 @@ static int rcar_du_remove(struct platform_device *pdev) drm_dev_unregister(ddev); drm_kms_helper_poll_fini(ddev); - drm_mode_config_cleanup(ddev); drm_dev_put(ddev); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index c07c6a88aff0..b0335da0c161 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -13,6 +13,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_panel.h> +#include <drm/drm_simple_kms_helper.h> #include "rcar_du_drv.h" #include "rcar_du_encoder.h" @@ -23,13 +24,6 @@ * Encoder */ -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { -}; - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static unsigned int rcar_du_encoder_count_ports(struct device_node *node) { struct device_node *ports; @@ -110,13 +104,11 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, } } - ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, - DRM_MODE_ENCODER_NONE, NULL); + ret = drm_simple_encoder_init(rcdu->ddev, encoder, + DRM_MODE_ENCODER_NONE); if (ret < 0) goto done; - drm_encoder_helper_add(encoder, &encoder_helper_funcs); - /* * Attach the bridge to the encoder. The bridge will create the * connector. diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index fcfd916227d1..482329102f19 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -712,7 +712,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) unsigned int i; int ret; - drm_mode_config_init(dev); + ret = drmm_mode_config_init(dev); + if (ret) + return ret; dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index f38f5e113c6b..ade2327a10e2 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -26,6 +26,7 @@ #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" @@ -258,10 +259,6 @@ static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = { .atomic_check = rockchip_dp_drm_encoder_atomic_check, }; -static struct drm_encoder_funcs rockchip_dp_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int rockchip_dp_of_probe(struct rockchip_dp_device *dp) { struct device *dev = dp->dev; @@ -309,8 +306,8 @@ static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp) dev->of_node); DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); - ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + ret = drm_simple_encoder_init(drm_dev, encoder, + DRM_MODE_ENCODER_TMDS); if (ret) { DRM_ERROR("failed to initialize encoder with drm\n"); return ret; @@ -325,15 +322,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, void *data) { struct rockchip_dp_device *dp = dev_get_drvdata(dev); - const struct rockchip_dp_chip_data *dp_data; struct drm_device *drm_dev = data; int ret; - dp_data = of_device_get_match_data(dev); - if (!dp_data) - return -ENODEV; - - dp->data = dp_data; dp->drm_dev = drm_dev; ret = rockchip_dp_drm_create_encoder(dp); @@ -344,16 +335,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, dp->plat_data.encoder = &dp->encoder; - dp->plat_data.dev_type = dp->data->chip_type; - dp->plat_data.power_on_start = rockchip_dp_poweron_start; - dp->plat_data.power_off = rockchip_dp_powerdown; - dp->plat_data.get_modes = rockchip_dp_get_modes; - - dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); - if (IS_ERR(dp->adp)) { - ret = PTR_ERR(dp->adp); + ret = analogix_dp_bind(dp->adp, drm_dev); + if (ret) goto err_cleanup_encoder; - } return 0; err_cleanup_encoder: @@ -368,8 +352,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master, analogix_dp_unbind(dp->adp); dp->encoder.funcs->destroy(&dp->encoder); - - dp->adp = ERR_PTR(-ENODEV); } static const struct component_ops rockchip_dp_component_ops = { @@ -380,10 +362,15 @@ static const struct component_ops rockchip_dp_component_ops = { static int rockchip_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct rockchip_dp_chip_data *dp_data; struct drm_panel *panel = NULL; struct rockchip_dp_device *dp; int ret; + dp_data = of_device_get_match_data(dev); + if (!dp_data) + return -ENODEV; + ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); if (ret < 0) return ret; @@ -394,7 +381,12 @@ static int rockchip_dp_probe(struct platform_device *pdev) dp->dev = dev; dp->adp = ERR_PTR(-ENODEV); + dp->data = dp_data; dp->plat_data.panel = panel; + dp->plat_data.dev_type = dp->data->chip_type; + dp->plat_data.power_on_start = rockchip_dp_poweron_start; + dp->plat_data.power_off = rockchip_dp_powerdown; + dp->plat_data.get_modes = rockchip_dp_get_modes; ret = rockchip_dp_of_probe(dp); if (ret < 0) @@ -402,12 +394,19 @@ static int rockchip_dp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dp); + dp->adp = analogix_dp_probe(dev, &dp->plat_data); + if (IS_ERR(dp->adp)) + return PTR_ERR(dp->adp); + return component_add(dev, &rockchip_dp_component_ops); } static int rockchip_dp_remove(struct platform_device *pdev) { + struct rockchip_dp_device *dp = platform_get_drvdata(pdev); + component_del(&pdev->dev, &rockchip_dp_component_ops); + analogix_dp_remove(dp->adp); return 0; } diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index eed594bd38d3..06f85138b51b 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -20,6 +20,7 @@ #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "cdn-dp-core.h" #include "cdn-dp-reg.h" @@ -689,10 +690,6 @@ static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { .atomic_check = cdn_dp_encoder_atomic_check, }; -static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int cdn_dp_parse_dt(struct cdn_dp_device *dp) { struct device *dev = dp->dev; @@ -1030,8 +1027,8 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) dev->of_node); DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); - ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + ret = drm_simple_encoder_init(drm_dev, encoder, + DRM_MODE_ENCODER_TMDS); if (ret) { DRM_ERROR("failed to initialize encoder with drm\n"); return ret; diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 6e1270e45f97..3feff0c45b3f 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -21,6 +21,7 @@ #include <drm/bridge/dw_mipi_dsi.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> +#include <drm/drm_simple_kms_helper.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" @@ -789,10 +790,6 @@ dw_mipi_dsi_encoder_helper_funcs = { .disable = dw_mipi_dsi_encoder_disable, }; -static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi, struct drm_device *drm_dev) { @@ -802,8 +799,7 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi, encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dsi->dev->of_node); - ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI); if (ret) { DRM_ERROR("Failed to initialize encoder with drm\n"); return ret; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 7f56d8c3491d..121aa8a63a76 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -14,6 +14,7 @@ #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" @@ -237,10 +238,6 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector, return (valid) ? MODE_OK : MODE_BAD; } -static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder) { } @@ -546,8 +543,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, } drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); - drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); platform_set_drvdata(pdev, hdmi); diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index e5864e823020..7afdc54eb3ec 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -19,6 +19,7 @@ #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" @@ -532,10 +533,6 @@ static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = { .atomic_check = inno_hdmi_encoder_atomic_check, }; -static struct drm_encoder_funcs inno_hdmi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static enum drm_connector_status inno_hdmi_connector_detect(struct drm_connector *connector, bool force) { @@ -617,8 +614,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) return -EPROBE_DEFER; drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs); - drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index fe203d38664e..1c546c3a8998 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -6,6 +6,7 @@ #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> #include <linux/mfd/syscon.h> @@ -451,10 +452,6 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = { .atomic_check = rk3066_hdmi_encoder_atomic_check, }; -static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static enum drm_connector_status rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force) { @@ -557,8 +554,7 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi) return -EPROBE_DEFER; drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs); - drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 20ecb1508a22..0f3eb392fe39 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -135,14 +135,16 @@ static int rockchip_drm_bind(struct device *dev) if (ret) goto err_free; - drm_mode_config_init(drm_dev); + ret = drmm_mode_config_init(drm_dev); + if (ret) + goto err_iommu_cleanup; rockchip_drm_mode_config_init(drm_dev); /* Try to bind all sub drivers. */ ret = component_bind_all(dev, drm_dev); if (ret) - goto err_mode_config_cleanup; + goto err_iommu_cleanup; ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc); if (ret) @@ -173,12 +175,9 @@ err_kms_helper_poll_fini: rockchip_drm_fbdev_fini(drm_dev); err_unbind_all: component_unbind_all(dev, drm_dev); -err_mode_config_cleanup: - drm_mode_config_cleanup(drm_dev); +err_iommu_cleanup: rockchip_iommu_cleanup(drm_dev); err_free: - drm_dev->dev_private = NULL; - dev_set_drvdata(dev, NULL); drm_dev_put(drm_dev); return ret; } @@ -194,11 +193,8 @@ static void rockchip_drm_unbind(struct device *dev) drm_atomic_helper_shutdown(drm_dev); component_unbind_all(dev, drm_dev); - drm_mode_config_cleanup(drm_dev); rockchip_iommu_cleanup(drm_dev); - drm_dev->dev_private = NULL; - dev_set_drvdata(dev, NULL); drm_dev_put(drm_dev); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index c5b06048124e..e33c2dcd0d4b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -30,6 +30,7 @@ struct rockchip_crtc_state { int output_mode; int output_bpc; int output_flags; + bool enable_afbc; }; #define to_rockchip_crtc_state(s) \ container_of(s, struct rockchip_crtc_state, base) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 221e72e71432..9b13c784b347 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -57,8 +57,49 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; +static struct drm_framebuffer * +rockchip_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_afbc_framebuffer *afbc_fb; + const struct drm_format_info *info; + int ret; + + info = drm_get_format_info(dev, mode_cmd); + if (!info) + return ERR_PTR(-ENOMEM); + + afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL); + if (!afbc_fb) + return ERR_PTR(-ENOMEM); + + ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd, + &rockchip_drm_fb_funcs); + if (ret) { + kfree(afbc_fb); + return ERR_PTR(ret); + } + + if (drm_is_afbc(mode_cmd->modifier[0])) { + int ret, i; + + ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb); + if (ret) { + struct drm_gem_object **obj = afbc_fb->base.obj; + + for (i = 0; i < info->num_planes; ++i) + drm_gem_object_put_unlocked(obj[i]); + + kfree(afbc_fb); + return ERR_PTR(ret); + } + } + + return &afbc_fb->base; +} + static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { - .fb_create = drm_gem_fb_create_with_dirty, + .fb_create = rockchip_fb_create, .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index cecb2cc781f5..33463b79a37b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -91,9 +91,22 @@ #define VOP_WIN_TO_INDEX(vop_win) \ ((vop_win) - (vop_win)->vop->win) +#define VOP_AFBC_SET(vop, name, v) \ + do { \ + if ((vop)->data->afbc) \ + vop_reg_set((vop), &(vop)->data->afbc->name, \ + 0, ~0, v, #name); \ + } while (0) + #define to_vop(x) container_of(x, struct vop, crtc) #define to_vop_win(x) container_of(x, struct vop_win, base) +#define AFBC_FMT_RGB565 0x0 +#define AFBC_FMT_U8U8U8U8 0x5 +#define AFBC_FMT_U8U8U8 0x4 + +#define AFBC_TILE_16x16 BIT(4) + /* * The coefficients of the following matrix are all fixed points. * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets. @@ -274,6 +287,29 @@ static enum vop_data_format vop_convert_format(uint32_t format) } } +static int vop_convert_afbc_format(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + return AFBC_FMT_U8U8U8U8; + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + return AFBC_FMT_U8U8U8; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + return AFBC_FMT_RGB565; + /* either of the below should not be reachable */ + default: + DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format); + return -EINVAL; + } + + return -EINVAL; +} + static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src, uint32_t dst, bool is_horizontal, int vsu_mode, int *vskiplines) @@ -598,6 +634,17 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) vop_win_disable(vop, vop_win); } } + + if (vop->data->afbc) { + struct rockchip_crtc_state *s; + /* + * Disable AFBC and forget there was a vop window with AFBC + */ + VOP_AFBC_SET(vop, enable, 0); + s = to_rockchip_crtc_state(crtc->state); + s->enable_afbc = false; + } + spin_unlock(&vop->reg_lock); vop_cfg_done(vop); @@ -710,6 +757,26 @@ static void vop_plane_destroy(struct drm_plane *plane) drm_plane_cleanup(plane); } +static inline bool rockchip_afbc(u64 modifier) +{ + return modifier == ROCKCHIP_AFBC_MOD; +} + +static bool rockchip_mod_supported(struct drm_plane *plane, + u32 format, u64 modifier) +{ + if (modifier == DRM_FORMAT_MOD_LINEAR) + return true; + + if (!rockchip_afbc(modifier)) { + DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier); + + return false; + } + + return vop_convert_afbc_format(format) >= 0; +} + static int vop_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { @@ -758,6 +825,30 @@ static int vop_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } + if (rockchip_afbc(fb->modifier)) { + struct vop *vop = to_vop(crtc); + + if (!vop->data->afbc) { + DRM_ERROR("vop does not support AFBC\n"); + return -EINVAL; + } + + ret = vop_convert_afbc_format(fb->format->format); + if (ret < 0) + return ret; + + if (state->src.x1 || state->src.y1) { + DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]); + return -EINVAL; + } + + if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) { + DRM_ERROR("No rotation support in AFBC, rotation=%d\n", + state->rotation); + return -EINVAL; + } + } + return 0; } @@ -846,6 +937,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane, spin_lock(&vop->reg_lock); + if (rockchip_afbc(fb->modifier)) { + int afbc_format = vop_convert_afbc_format(fb->format->format); + + VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16); + VOP_AFBC_SET(vop, hreg_block_split, 0); + VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win)); + VOP_AFBC_SET(vop, hdr_ptr, dma_addr); + VOP_AFBC_SET(vop, pic_size, act_info); + } + VOP_WIN_SET(vop, win, format, format); VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); @@ -1001,6 +1102,7 @@ static const struct drm_plane_funcs vop_plane_funcs = { .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .format_mod_supported = rockchip_mod_supported, }; static int vop_crtc_enable_vblank(struct drm_crtc *crtc) @@ -1310,6 +1412,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { struct vop *vop = to_vop(crtc); + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct rockchip_crtc_state *s; + int afbc_planes = 0; if (vop->lut_regs && crtc_state->color_mgmt_changed && crtc_state->gamma_lut) { @@ -1323,6 +1429,27 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc, } } + drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { + plane_state = + drm_atomic_get_plane_state(crtc_state->state, plane); + if (IS_ERR(plane_state)) { + DRM_DEBUG_KMS("Cannot get plane state for plane %s\n", + plane->name); + return PTR_ERR(plane_state); + } + + if (drm_is_afbc(plane_state->fb->modifier)) + ++afbc_planes; + } + + if (afbc_planes > 1) { + DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes); + return -EINVAL; + } + + s = to_rockchip_crtc_state(crtc_state); + s->enable_afbc = afbc_planes > 0; + return 0; } @@ -1333,6 +1460,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_plane_state *old_plane_state, *new_plane_state; struct vop *vop = to_vop(crtc); struct drm_plane *plane; + struct rockchip_crtc_state *s; int i; if (WARN_ON(!vop->is_enabled)) @@ -1340,6 +1468,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock(&vop->reg_lock); + /* Enable AFBC if there is some AFBC window, disable otherwise. */ + s = to_rockchip_crtc_state(crtc->state); + VOP_AFBC_SET(vop, enable, s->enable_afbc); vop_cfg_done(vop); spin_unlock(&vop->reg_lock); @@ -1634,7 +1765,8 @@ static int vop_create_crtc(struct vop *vop) 0, &vop_plane_funcs, win_data->phy->data_formats, win_data->phy->nformats, - NULL, win_data->type, NULL); + win_data->phy->format_modifiers, + win_data->type, NULL); if (ret) { DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n", ret); @@ -1678,7 +1810,8 @@ static int vop_create_crtc(struct vop *vop) &vop_plane_funcs, win_data->phy->data_formats, win_data->phy->nformats, - NULL, win_data->type, NULL); + win_data->phy->format_modifiers, + win_data->type, NULL); if (ret) { DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n", ret); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index cc672620d6e0..d03bdb531ef2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -17,6 +17,11 @@ #define NUM_YUV2YUV_COEFFICIENTS 12 +#define ROCKCHIP_AFBC_MOD \ + DRM_FORMAT_MOD_ARM_AFBC( \ + AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \ + ) + enum vop_data_format { VOP_FMT_ARGB8888 = 0, VOP_FMT_RGB888, @@ -34,6 +39,16 @@ struct vop_reg { bool relaxed; }; +struct vop_afbc { + struct vop_reg enable; + struct vop_reg win_sel; + struct vop_reg format; + struct vop_reg hreg_block_split; + struct vop_reg pic_size; + struct vop_reg hdr_ptr; + struct vop_reg rstn; +}; + struct vop_modeset { struct vop_reg htotal_pw; struct vop_reg hact_st_end; @@ -134,6 +149,7 @@ struct vop_win_phy { const struct vop_scl_regs *scl; const uint32_t *data_formats; uint32_t nformats; + const uint64_t *format_modifiers; struct vop_reg enable; struct vop_reg gate; @@ -173,6 +189,7 @@ struct vop_data { const struct vop_misc *misc; const struct vop_modeset *modeset; const struct vop_output *output; + const struct vop_afbc *afbc; const struct vop_win_yuv2yuv_data *win_yuv2yuv; const struct vop_win_data *win; unsigned int win_size; diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 449a62908d21..63f967902c2d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -16,13 +16,14 @@ #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> + #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> - #include <drm/drm_dp_helper.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" @@ -435,10 +436,6 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = { .atomic_check = rockchip_lvds_encoder_atomic_check, }; -static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int rk3288_lvds_probe(struct platform_device *pdev, struct rockchip_lvds *lvds) { @@ -607,8 +604,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dev->of_node); - ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS); if (ret < 0) { DRM_DEV_ERROR(drm_dev->dev, "failed to initialize encoder: %d\n", ret); diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 90784781e515..9a771af5d0c9 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -14,6 +14,7 @@ #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" @@ -67,10 +68,6 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = { .atomic_check = rockchip_rgb_encoder_atomic_check, }; -static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - struct rockchip_rgb *rockchip_rgb_init(struct device *dev, struct drm_crtc *crtc, struct drm_device *drm_dev) @@ -126,8 +123,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, encoder = &rgb->encoder; encoder->possible_crtcs = drm_crtc_mask(crtc); - ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs, - DRM_MODE_ENCODER_NONE, NULL); + ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE); if (ret < 0) { DRM_DEV_ERROR(drm_dev->dev, "failed to initialize encoder: %d\n", ret); diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 7a9d979c8d5d..2413deded22c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -50,6 +50,17 @@ static const uint32_t formats_win_full[] = { DRM_FORMAT_NV24, }; +static const uint64_t format_modifiers_win_full[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + +static const uint64_t format_modifiers_win_full_afbc[] = { + ROCKCHIP_AFBC_MOD, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + static const uint32_t formats_win_lite[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, @@ -61,6 +72,11 @@ static const uint32_t formats_win_lite[] = { DRM_FORMAT_BGR565, }; +static const uint64_t format_modifiers_win_lite[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + static const struct vop_scl_regs rk3036_win_scl = { .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), @@ -72,6 +88,7 @@ static const struct vop_win_phy rk3036_win0_data = { .scl = &rk3036_win_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full, .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3), .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15), @@ -87,6 +104,7 @@ static const struct vop_win_phy rk3036_win0_data = { static const struct vop_win_phy rk3036_win1_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), @@ -153,6 +171,7 @@ static const struct vop_data rk3036_vop = { static const struct vop_win_phy rk3126_win1_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), @@ -234,6 +253,7 @@ static const struct vop_win_phy px30_win0_data = { .scl = &px30_win_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full, .enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0), .format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1), .rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12), @@ -249,6 +269,7 @@ static const struct vop_win_phy px30_win0_data = { static const struct vop_win_phy px30_win1_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0), .format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4), .rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12), @@ -261,6 +282,7 @@ static const struct vop_win_phy px30_win1_data = { static const struct vop_win_phy px30_win2_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4), .enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0), .format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5), @@ -316,6 +338,7 @@ static const struct vop_win_phy rk3066_win0_data = { .scl = &rk3066_win_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full, .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0), .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4), .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19), @@ -332,6 +355,7 @@ static const struct vop_win_phy rk3066_win1_data = { .scl = &rk3066_win_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full, .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1), .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7), .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23), @@ -347,6 +371,7 @@ static const struct vop_win_phy rk3066_win1_data = { static const struct vop_win_phy rk3066_win2_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2), .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10), .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27), @@ -426,6 +451,7 @@ static const struct vop_win_phy rk3188_win0_data = { .scl = &rk3188_win_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full, .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0), .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3), .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15), @@ -440,6 +466,7 @@ static const struct vop_win_phy rk3188_win0_data = { static const struct vop_win_phy rk3188_win1_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1), .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6), .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19), @@ -545,6 +572,7 @@ static const struct vop_win_phy rk3288_win01_data = { .scl = &rk3288_win_full_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full, .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), @@ -563,6 +591,7 @@ static const struct vop_win_phy rk3288_win01_data = { static const struct vop_win_phy rk3288_win23_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4), .gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0), .format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1), @@ -677,6 +706,7 @@ static const struct vop_win_phy rk3368_win01_data = { .scl = &rk3288_win_full_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full, .enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0), .format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1), .rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12), @@ -697,6 +727,7 @@ static const struct vop_win_phy rk3368_win01_data = { static const struct vop_win_phy rk3368_win23_data = { .data_formats = formats_win_lite, .nformats = ARRAY_SIZE(formats_win_lite), + .format_modifiers = format_modifiers_win_lite, .gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0), .enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4), .format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5), @@ -817,6 +848,53 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = { .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) }, { .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data }, { .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data }, + +}; + +static const struct vop_win_phy rk3399_win01_data = { + .scl = &rk3288_win_full_scl, + .data_formats = formats_win_full, + .nformats = ARRAY_SIZE(formats_win_full), + .format_modifiers = format_modifiers_win_full_afbc, + .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), + .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), + .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), + .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), + .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0), + .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0), + .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), + .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), + .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), +}; + +/* + * rk3399 vop big windows register layout is same as rk3288, but we + * have a separate rk3399 win data array here so that we can advertise + * AFBC on the primary plane. + */ +static const struct vop_win_data rk3399_vop_win_data[] = { + { .base = 0x00, .phy = &rk3399_win01_data, + .type = DRM_PLANE_TYPE_PRIMARY }, + { .base = 0x40, .phy = &rk3288_win01_data, + .type = DRM_PLANE_TYPE_OVERLAY }, + { .base = 0x00, .phy = &rk3288_win23_data, + .type = DRM_PLANE_TYPE_OVERLAY }, + { .base = 0x50, .phy = &rk3288_win23_data, + .type = DRM_PLANE_TYPE_CURSOR }, +}; + +static const struct vop_afbc rk3399_vop_afbc = { + .rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3), + .enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0), + .win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1), + .format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16), + .hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21), + .hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0), + .pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0), }; static const struct vop_data rk3399_vop_big = { @@ -826,9 +904,10 @@ static const struct vop_data rk3399_vop_big = { .common = &rk3288_common, .modeset = &rk3288_modeset, .output = &rk3399_output, + .afbc = &rk3399_vop_afbc, .misc = &rk3368_misc, - .win = rk3368_vop_win_data, - .win_size = ARRAY_SIZE(rk3368_vop_win_data), + .win = rk3399_vop_win_data, + .win_size = ARRAY_SIZE(rk3399_vop_win_data), .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data, }; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 75a752d59ef1..03556dbfcafb 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -17,6 +17,7 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> #include "shmob_drm_backlight.h" @@ -558,15 +559,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = { .mode_set = shmob_drm_encoder_mode_set, }; -static void shmob_drm_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = shmob_drm_encoder_destroy, -}; - int shmob_drm_encoder_create(struct shmob_drm_device *sdev) { struct drm_encoder *encoder = &sdev->encoder.encoder; @@ -576,8 +568,8 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev) encoder->possible_crtcs = 1; - ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + ret = drm_simple_encoder_init(sdev->ddev, encoder, + DRM_MODE_ENCODER_LVDS); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index b8c0930959c7..ae9d6b8d3ca8 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -192,7 +192,6 @@ static int shmob_drm_remove(struct platform_device *pdev) drm_dev_unregister(ddev); drm_kms_helper_poll_fini(ddev); - drm_mode_config_cleanup(ddev); drm_irq_uninstall(ddev); drm_dev_put(ddev); @@ -288,7 +287,6 @@ err_irq_uninstall: drm_irq_uninstall(ddev); err_modeset_cleanup: drm_kms_helper_poll_fini(ddev); - drm_mode_config_cleanup(ddev); err_free_drm_dev: drm_dev_put(ddev); diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c index c51197b6fd85..7a866d6ce6bb 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c @@ -126,7 +126,11 @@ static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = { int shmob_drm_modeset_init(struct shmob_drm_device *sdev) { - drm_mode_config_init(sdev->ddev); + int ret; + + ret = drmm_mode_config_init(sdev->ddev); + if (ret) + return ret; shmob_drm_crtc_create(sdev); shmob_drm_encoder_create(sdev); diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index c7652584255d..319962a2c17b 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -42,8 +42,8 @@ static const struct sti_compositor_data stih407_compositor_data = { }, }; -int sti_compositor_debugfs_init(struct sti_compositor *compo, - struct drm_minor *minor) +void sti_compositor_debugfs_init(struct sti_compositor *compo, + struct drm_minor *minor) { unsigned int i; @@ -54,8 +54,6 @@ int sti_compositor_debugfs_init(struct sti_compositor *compo, for (i = 0; i < STI_MAX_MIXER; i++) if (compo->mixer[i]) sti_mixer_debugfs_init(compo->mixer[i], minor); - - return 0; } static int sti_compositor_bind(struct device *dev, diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h index ac4bb3834810..25bb01bdd013 100644 --- a/drivers/gpu/drm/sti/sti_compositor.h +++ b/drivers/gpu/drm/sti/sti_compositor.h @@ -79,7 +79,7 @@ struct sti_compositor { struct notifier_block vtg_vblank_nb[STI_MAX_MIXER]; }; -int sti_compositor_debugfs_init(struct sti_compositor *compo, - struct drm_minor *minor); +void sti_compositor_debugfs_init(struct sti_compositor *compo, + struct drm_minor *minor); #endif diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 49e6cb8f5836..6f37c104c46f 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -319,7 +319,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc) struct sti_compositor *compo = dev_get_drvdata(mixer->dev); if (drm_crtc_index(crtc) == 0) - return sti_compositor_debugfs_init(compo, crtc->dev->primary); + sti_compositor_debugfs_init(compo, crtc->dev->primary); return 0; } diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index ea64c1dcaf63..a98057431023 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -131,17 +131,17 @@ static struct drm_info_list cursor_debugfs_files[] = { { "cursor", cursor_dbg_show, 0, NULL }, }; -static int cursor_debugfs_init(struct sti_cursor *cursor, - struct drm_minor *minor) +static void cursor_debugfs_init(struct sti_cursor *cursor, + struct drm_minor *minor) { unsigned int i; for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++) cursor_debugfs_files[i].data = cursor; - return drm_debugfs_create_files(cursor_debugfs_files, - ARRAY_SIZE(cursor_debugfs_files), - minor->debugfs_root, minor); + drm_debugfs_create_files(cursor_debugfs_files, + ARRAY_SIZE(cursor_debugfs_files), + minor->debugfs_root, minor); } static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src) @@ -342,7 +342,9 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane) struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_cursor *cursor = to_sti_cursor(plane); - return cursor_debugfs_init(cursor, drm_plane->dev->primary); + cursor_debugfs_init(cursor, drm_plane->dev->primary); + + return 0; } static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = { diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 50870d8cbb76..3f9db3e3f397 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -92,24 +92,16 @@ static struct drm_info_list sti_drm_dbg_list[] = { {"fps_get", sti_drm_fps_dbg_show, 0}, }; -static int sti_drm_dbg_init(struct drm_minor *minor) +static void sti_drm_dbg_init(struct drm_minor *minor) { - int ret; - - ret = drm_debugfs_create_files(sti_drm_dbg_list, - ARRAY_SIZE(sti_drm_dbg_list), - minor->debugfs_root, minor); - if (ret) - goto err; + drm_debugfs_create_files(sti_drm_dbg_list, + ARRAY_SIZE(sti_drm_dbg_list), + minor->debugfs_root, minor); debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root, minor->dev, &sti_drm_fps_fops); DRM_INFO("%s: debugfs installed\n", DRIVER_NAME); - return 0; -err: - DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME); - return ret; } static const struct drm_mode_config_funcs sti_mode_config_funcs = { diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 3d04bfca21a0..de4af7735c46 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -196,16 +196,16 @@ static struct drm_info_list dvo_debugfs_files[] = { { "dvo", dvo_dbg_show, 0, NULL }, }; -static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor) +static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor) { unsigned int i; for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++) dvo_debugfs_files[i].data = dvo; - return drm_debugfs_create_files(dvo_debugfs_files, - ARRAY_SIZE(dvo_debugfs_files), - minor->debugfs_root, minor); + drm_debugfs_create_files(dvo_debugfs_files, + ARRAY_SIZE(dvo_debugfs_files), + minor->debugfs_root, minor); } static void sti_dvo_disable(struct drm_bridge *bridge) @@ -405,10 +405,7 @@ static int sti_dvo_late_register(struct drm_connector *connector) = to_sti_dvo_connector(connector); struct sti_dvo *dvo = dvo_connector->dvo; - if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) { - DRM_ERROR("DVO debugfs setup failed\n"); - return -EINVAL; - } + dvo_debugfs_init(dvo, dvo->drm_dev->primary); return 0; } diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 11595c748844..2d5a2b5b78b8 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -343,9 +343,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor) for (i = 0; i < nb_files; i++) gdp_debugfs_files[i].data = gdp; - return drm_debugfs_create_files(gdp_debugfs_files, - nb_files, - minor->debugfs_root, minor); + drm_debugfs_create_files(gdp_debugfs_files, + nb_files, + minor->debugfs_root, minor); + return 0; } static int sti_gdp_fourcc2format(int fourcc) diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index f3f28d79b0e4..a1ec891eaf3a 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -367,16 +367,16 @@ static struct drm_info_list hda_debugfs_files[] = { { "hda", hda_dbg_show, 0, NULL }, }; -static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor) +static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor) { unsigned int i; for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++) hda_debugfs_files[i].data = hda; - return drm_debugfs_create_files(hda_debugfs_files, - ARRAY_SIZE(hda_debugfs_files), - minor->debugfs_root, minor); + drm_debugfs_create_files(hda_debugfs_files, + ARRAY_SIZE(hda_debugfs_files), + minor->debugfs_root, minor); } /** @@ -643,10 +643,7 @@ static int sti_hda_late_register(struct drm_connector *connector) = to_sti_hda_connector(connector); struct sti_hda *hda = hda_connector->hda; - if (hda_debugfs_init(hda, hda->drm_dev->primary)) { - DRM_ERROR("HDA debugfs setup failed\n"); - return -EINVAL; - } + hda_debugfs_init(hda, hda->drm_dev->primary); return 0; } diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 18eaf786ffa4..5b15c4974e6b 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -727,16 +727,16 @@ static struct drm_info_list hdmi_debugfs_files[] = { { "hdmi", hdmi_dbg_show, 0, NULL }, }; -static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor) +static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor) { unsigned int i; for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++) hdmi_debugfs_files[i].data = hdmi; - return drm_debugfs_create_files(hdmi_debugfs_files, - ARRAY_SIZE(hdmi_debugfs_files), - minor->debugfs_root, minor); + drm_debugfs_create_files(hdmi_debugfs_files, + ARRAY_SIZE(hdmi_debugfs_files), + minor->debugfs_root, minor); } static void sti_hdmi_disable(struct drm_bridge *bridge) @@ -1113,10 +1113,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector) = to_sti_hdmi_connector(connector); struct sti_hdmi *hdmi = hdmi_connector->hdmi; - if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) { - DRM_ERROR("HDMI debugfs setup failed\n"); - return -EINVAL; - } + hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary); return 0; } diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 1015abe0ce08..5a4e12194a77 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -639,16 +639,16 @@ static struct drm_info_list hqvdp_debugfs_files[] = { { "hqvdp", hqvdp_dbg_show, 0, NULL }, }; -static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor) +static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor) { unsigned int i; for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++) hqvdp_debugfs_files[i].data = hqvdp; - return drm_debugfs_create_files(hqvdp_debugfs_files, - ARRAY_SIZE(hqvdp_debugfs_files), - minor->debugfs_root, minor); + drm_debugfs_create_files(hqvdp_debugfs_files, + ARRAY_SIZE(hqvdp_debugfs_files), + minor->debugfs_root, minor); } /** @@ -1274,7 +1274,9 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane) struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); - return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary); + hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary); + + return 0; } static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = { diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index c3a3e1e5fc8a..7e5f14646625 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -178,7 +178,7 @@ static struct drm_info_list mixer1_debugfs_files[] = { { "mixer_aux", mixer_dbg_show, 0, NULL }, }; -int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) +void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) { unsigned int i; struct drm_info_list *mixer_debugfs_files; @@ -194,15 +194,15 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) nb_files = ARRAY_SIZE(mixer1_debugfs_files); break; default: - return -EINVAL; + return; } for (i = 0; i < nb_files; i++) mixer_debugfs_files[i].data = mixer; - return drm_debugfs_create_files(mixer_debugfs_files, - nb_files, - minor->debugfs_root, minor); + drm_debugfs_create_files(mixer_debugfs_files, + nb_files, + minor->debugfs_root, minor); } void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable) diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h index d9544246913a..ab06beb7b258 100644 --- a/drivers/gpu/drm/sti/sti_mixer.h +++ b/drivers/gpu/drm/sti/sti_mixer.h @@ -58,7 +58,7 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer, void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); -int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor); +void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor); /* depth in Cross-bar control = z order */ #define GAM_MIXER_NB_DEPTH_LEVEL 6 diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index c36a8da373cb..df3817f0fd30 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -570,16 +570,16 @@ static struct drm_info_list tvout_debugfs_files[] = { { "tvout", tvout_dbg_show, 0, NULL }, }; -static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor) +static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor) { unsigned int i; for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++) tvout_debugfs_files[i].data = tvout; - return drm_debugfs_create_files(tvout_debugfs_files, - ARRAY_SIZE(tvout_debugfs_files), - minor->debugfs_root, minor); + drm_debugfs_create_files(tvout_debugfs_files, + ARRAY_SIZE(tvout_debugfs_files), + minor->debugfs_root, minor); } static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode) @@ -603,14 +603,11 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder) static int sti_tvout_late_register(struct drm_encoder *encoder) { struct sti_tvout *tvout = to_sti_tvout(encoder); - int ret; if (tvout->debugfs_registered) return 0; - ret = tvout_debugfs_init(tvout, encoder->dev->primary); - if (ret) - return ret; + tvout_debugfs_init(tvout, encoder->dev->primary); tvout->debugfs_registered = true; return 0; diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c index 2d4230410464..2d818397918d 100644 --- a/drivers/gpu/drm/sti/sti_vid.c +++ b/drivers/gpu/drm/sti/sti_vid.c @@ -124,16 +124,16 @@ static struct drm_info_list vid_debugfs_files[] = { { "vid", vid_dbg_show, 0, NULL }, }; -int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) +void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) { unsigned int i; for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++) vid_debugfs_files[i].data = vid; - return drm_debugfs_create_files(vid_debugfs_files, - ARRAY_SIZE(vid_debugfs_files), - minor->debugfs_root, minor); + drm_debugfs_create_files(vid_debugfs_files, + ARRAY_SIZE(vid_debugfs_files), + minor->debugfs_root, minor); } void sti_vid_commit(struct sti_vid *vid, diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h index 9dbd78461de1..991849ba50b5 100644 --- a/drivers/gpu/drm/sti/sti_vid.h +++ b/drivers/gpu/drm/sti/sti_vid.h @@ -26,6 +26,6 @@ void sti_vid_disable(struct sti_vid *vid); struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, int id, void __iomem *baseaddr); -int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor); +void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor); #endif diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index ea9fcbdc68b3..0f85dd86cafa 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -88,7 +88,9 @@ static int drv_load(struct drm_device *ddev) ddev->dev_private = (void *)ldev; - drm_mode_config_init(ddev); + ret = drmm_mode_config_init(ddev); + if (ret) + return ret; /* * set max width and height as default value. @@ -103,7 +105,7 @@ static int drv_load(struct drm_device *ddev) ret = ltdc_load(ddev); if (ret) - goto err; + return ret; drm_mode_config_reset(ddev); drm_kms_helper_poll_init(ddev); @@ -111,9 +113,6 @@ static int drv_load(struct drm_device *ddev) platform_set_drvdata(pdev, ddev); return 0; -err: - drm_mode_config_cleanup(ddev); - return ret; } static void drv_unload(struct drm_device *ddev) @@ -122,7 +121,6 @@ static void drv_unload(struct drm_device *ddev) drm_kms_helper_poll_fini(ddev); ltdc_unload(ddev); - drm_mode_config_cleanup(ddev); } static __maybe_unused int drv_suspend(struct device *dev) diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 68d4644ac2dc..e324d7db7b7d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -22,6 +22,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "sun4i_backend.h" #include "sun4i_crtc.h" @@ -204,10 +205,6 @@ static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = { .mode_valid = sun4i_hdmi_mode_valid, }; -static const struct drm_encoder_funcs sun4i_hdmi_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int sun4i_hdmi_get_modes(struct drm_connector *connector) { struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); @@ -611,11 +608,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, drm_encoder_helper_add(&hdmi->encoder, &sun4i_hdmi_helper_funcs); - ret = drm_encoder_init(drm, - &hdmi->encoder, - &sun4i_hdmi_funcs, - DRM_MODE_ENCODER_TMDS, - NULL); + ret = drm_simple_encoder_init(drm, &hdmi->encoder, + DRM_MODE_ENCODER_TMDS); if (ret) { dev_err(dev, "Couldn't initialise the HDMI encoder\n"); goto err_put_ddc_i2c; diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index 26e5c7ceb8ff..ffda3184aa12 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -12,6 +12,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "sun4i_crtc.h" #include "sun4i_tcon.h" @@ -96,10 +97,6 @@ static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { .enable = sun4i_lvds_encoder_enable, }; -static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { - .destroy = drm_encoder_cleanup, -}; - int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) { struct drm_encoder *encoder; @@ -121,11 +118,8 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_encoder_helper_add(&lvds->encoder, &sun4i_lvds_enc_helper_funcs); - ret = drm_encoder_init(drm, - &lvds->encoder, - &sun4i_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS, - NULL); + ret = drm_simple_encoder_init(drm, &lvds->encoder, + DRM_MODE_ENCODER_LVDS); if (ret) { dev_err(drm->dev, "Couldn't initialise the lvds encoder\n"); goto err_out; diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 3b23d5be3cf3..5a7d43939ae6 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -14,6 +14,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "sun4i_crtc.h" #include "sun4i_tcon.h" @@ -188,15 +189,6 @@ static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = { .mode_valid = sun4i_rgb_mode_valid, }; -static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static struct drm_encoder_funcs sun4i_rgb_enc_funcs = { - .destroy = sun4i_rgb_enc_destroy, -}; - int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) { struct drm_encoder *encoder; @@ -218,11 +210,8 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_encoder_helper_add(&rgb->encoder, &sun4i_rgb_enc_helper_funcs); - ret = drm_encoder_init(drm, - &rgb->encoder, - &sun4i_rgb_enc_funcs, - DRM_MODE_ENCODER_NONE, - NULL); + ret = drm_simple_encoder_init(drm, &rgb->encoder, + DRM_MODE_ENCODER_NONE); if (ret) { dev_err(drm->dev, "Couldn't initialise the rgb encoder\n"); goto err_out; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 624437b27cdc..359b56e43b83 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -812,10 +812,8 @@ static int sun4i_tcon_init_irq(struct device *dev, int irq, ret; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Couldn't retrieve the TCON interrupt\n"); + if (irq < 0) return irq; - } ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0, dev_name(dev), tcon); diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index 39c15282e448..63f4428ac3bf 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -19,6 +19,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "sun4i_crtc.h" #include "sun4i_drv.h" @@ -473,15 +474,6 @@ static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = { .mode_set = sun4i_tv_mode_set, }; -static void sun4i_tv_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static struct drm_encoder_funcs sun4i_tv_funcs = { - .destroy = sun4i_tv_destroy, -}; - static int sun4i_tv_comp_get_modes(struct drm_connector *connector) { int i; @@ -592,11 +584,8 @@ static int sun4i_tv_bind(struct device *dev, struct device *master, drm_encoder_helper_add(&tv->encoder, &sun4i_tv_helper_funcs); - ret = drm_encoder_init(drm, - &tv->encoder, - &sun4i_tv_funcs, - DRM_MODE_ENCODER_TVDAC, - NULL); + ret = drm_simple_encoder_init(drm, &tv->encoder, + DRM_MODE_ENCODER_TVDAC); if (ret) { dev_err(dev, "Couldn't initialise the TV encoder\n"); goto err_disable_clk; diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 059939789730..f6c67dd87a05 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -24,6 +24,7 @@ #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "sun4i_crtc.h" #include "sun4i_tcon.h" @@ -846,10 +847,6 @@ static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = { .enable = sun6i_dsi_encoder_enable, }; -static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = { - .destroy = drm_encoder_cleanup, -}; - static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi, const struct mipi_dsi_msg *msg) { @@ -1062,11 +1059,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master, drm_encoder_helper_add(&dsi->encoder, &sun6i_dsi_enc_helper_funcs); - ret = drm_encoder_init(drm, - &dsi->encoder, - &sun6i_dsi_enc_funcs, - DRM_MODE_ENCODER_DSI, - NULL); + ret = drm_simple_encoder_init(drm, &dsi->encoder, + DRM_MODE_ENCODER_DSI); if (ret) { dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n"); return ret; diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index e8a317d5ba19..972682bb8000 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -10,6 +10,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_of.h> +#include <drm/drm_simple_kms_helper.h> #include "sun8i_dw_hdmi.h" #include "sun8i_tcon_top.h" @@ -29,10 +30,6 @@ sun8i_dw_hdmi_encoder_helper_funcs = { .mode_set = sun8i_dw_hdmi_encoder_mode_set, }; -static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static enum drm_mode_status sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector, const struct drm_display_mode *mode) @@ -220,8 +217,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, } drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs); - drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); sun8i_hdmi_phy_init(hdmi->phy); diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 4a64f7ae437a..56cc037fd312 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -27,314 +27,225 @@ #include "sun8i_vi_layer.h" #include "sunxi_engine.h" +struct de2_fmt_info { + u32 drm_fmt; + u32 de2_fmt; +}; + static const struct de2_fmt_info de2_formats[] = { { .drm_fmt = DRM_FORMAT_ARGB8888, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_ABGR8888, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_RGBA8888, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_BGRA8888, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_XRGB8888, .de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_XBGR8888, .de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_RGBX8888, .de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_BGRX8888, .de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_RGB888, .de2_fmt = SUN8I_MIXER_FBFMT_RGB888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_BGR888, .de2_fmt = SUN8I_MIXER_FBFMT_BGR888, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_RGB565, .de2_fmt = SUN8I_MIXER_FBFMT_RGB565, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_BGR565, .de2_fmt = SUN8I_MIXER_FBFMT_BGR565, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_ARGB4444, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_XRGB4444, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_ABGR4444, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_XBGR4444, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_RGBA4444, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_RGBX4444, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_BGRA4444, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_BGRX4444, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_ARGB1555, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_XRGB1555, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_ABGR1555, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_XBGR1555, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_RGBA5551, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_RGBX5551, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_BGRA5551, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { /* for DE2 VI layer which ignores alpha */ .drm_fmt = DRM_FORMAT_BGRX5551, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_ARGB2101010, .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_ABGR2101010, .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_RGBA1010102, .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_BGRA1010102, .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102, - .rgb = true, - .csc = SUN8I_CSC_MODE_OFF, }, { .drm_fmt = DRM_FORMAT_UYVY, .de2_fmt = SUN8I_MIXER_FBFMT_UYVY, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_VYUY, .de2_fmt = SUN8I_MIXER_FBFMT_VYUY, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_YUYV, .de2_fmt = SUN8I_MIXER_FBFMT_YUYV, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_YVYU, .de2_fmt = SUN8I_MIXER_FBFMT_YVYU, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_NV16, .de2_fmt = SUN8I_MIXER_FBFMT_NV16, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_NV61, .de2_fmt = SUN8I_MIXER_FBFMT_NV61, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_NV12, .de2_fmt = SUN8I_MIXER_FBFMT_NV12, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_NV21, .de2_fmt = SUN8I_MIXER_FBFMT_NV21, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_YUV422, .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_YUV420, .de2_fmt = SUN8I_MIXER_FBFMT_YUV420, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_YUV411, .de2_fmt = SUN8I_MIXER_FBFMT_YUV411, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_YVU422, .de2_fmt = SUN8I_MIXER_FBFMT_YUV422, - .rgb = false, - .csc = SUN8I_CSC_MODE_YVU2RGB, }, { .drm_fmt = DRM_FORMAT_YVU420, .de2_fmt = SUN8I_MIXER_FBFMT_YUV420, - .rgb = false, - .csc = SUN8I_CSC_MODE_YVU2RGB, }, { .drm_fmt = DRM_FORMAT_YVU411, .de2_fmt = SUN8I_MIXER_FBFMT_YUV411, - .rgb = false, - .csc = SUN8I_CSC_MODE_YVU2RGB, }, { .drm_fmt = DRM_FORMAT_P010, .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, { .drm_fmt = DRM_FORMAT_P210, .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV, - .rgb = false, - .csc = SUN8I_CSC_MODE_YUV2RGB, }, }; -const struct de2_fmt_info *sun8i_mixer_format_info(u32 format) +int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format) { unsigned int i; for (i = 0; i < ARRAY_SIZE(de2_formats); ++i) - if (de2_formats[i].drm_fmt == format) - return &de2_formats[i]; + if (de2_formats[i].drm_fmt == format) { + *hw_format = de2_formats[i].de2_fmt; + return 0; + } - return NULL; + return -EINVAL; } static void sun8i_mixer_commit(struct sunxi_engine *engine) diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 345b28b0a80a..7576b523fdbb 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -10,7 +10,6 @@ #include <linux/regmap.h> #include <linux/reset.h> -#include "sun8i_csc.h" #include "sunxi_engine.h" #define SUN8I_MIXER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1)) @@ -144,13 +143,6 @@ #define SUN50I_MIXER_CDC0_EN 0xd0000 #define SUN50I_MIXER_CDC1_EN 0xd8000 -struct de2_fmt_info { - u32 drm_fmt; - u32 de2_fmt; - bool rgb; - enum sun8i_csc_mode csc; -}; - /** * struct sun8i_mixer_cfg - mixer HW configuration * @vi_num: number of VI channels @@ -210,5 +202,5 @@ sun8i_channel_base(struct sun8i_mixer *mixer, int channel) return DE2_CH_BASE + channel * DE2_CH_SIZE; } -const struct de2_fmt_info *sun8i_mixer_format_info(u32 format); +int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format); #endif /* _SUN8I_MIXER_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c index c87fd842918e..54f937a7d5e7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c @@ -19,8 +19,8 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> -#include "sun8i_ui_layer.h" #include "sun8i_mixer.h" +#include "sun8i_ui_layer.h" #include "sun8i_ui_scaler.h" static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel, @@ -174,18 +174,20 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel, int overlay, struct drm_plane *plane) { struct drm_plane_state *state = plane->state; - const struct de2_fmt_info *fmt_info; - u32 val, ch_base; + const struct drm_format_info *fmt; + u32 val, ch_base, hw_fmt; + int ret; ch_base = sun8i_channel_base(mixer, channel); - fmt_info = sun8i_mixer_format_info(state->fb->format->format); - if (!fmt_info || !fmt_info->rgb) { + fmt = state->fb->format; + ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); + if (ret || fmt->is_yuv) { DRM_DEBUG_DRIVER("Invalid format\n"); return -EINVAL; } - val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET; + val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET; regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay), SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val); diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index b8398ca18b0f..22c8c5375d0d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -12,8 +12,9 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> -#include "sun8i_vi_layer.h" +#include "sun8i_csc.h" #include "sun8i_mixer.h" +#include "sun8i_vi_layer.h" #include "sun8i_vi_scaler.h" static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel, @@ -210,28 +211,47 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, return 0; } +static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format) +{ + if (!format->is_yuv) + return SUN8I_CSC_MODE_OFF; + + switch (format->format) { + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YVU444: + return SUN8I_CSC_MODE_YVU2RGB; + default: + return SUN8I_CSC_MODE_YUV2RGB; + } +} + static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel, int overlay, struct drm_plane *plane) { struct drm_plane_state *state = plane->state; - const struct de2_fmt_info *fmt_info; - u32 val, ch_base; + u32 val, ch_base, csc_mode, hw_fmt; + const struct drm_format_info *fmt; + int ret; ch_base = sun8i_channel_base(mixer, channel); - fmt_info = sun8i_mixer_format_info(state->fb->format->format); - if (!fmt_info) { + fmt = state->fb->format; + ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt); + if (ret) { DRM_DEBUG_DRIVER("Invalid format\n"); - return -EINVAL; + return ret; } - val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET; + val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET; regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay), SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val); - if (fmt_info->csc != SUN8I_CSC_MODE_OFF) { - sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc, + csc_mode = sun8i_vi_layer_get_csc_mode(fmt); + if (csc_mode != SUN8I_CSC_MODE_OFF) { + sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode, state->color_encoding, state->color_range); sun8i_csc_enable_ccsc(mixer, channel, true); @@ -239,7 +259,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel, sun8i_csc_enable_ccsc(mixer, channel, false); } - if (fmt_info->rgb) + if (!fmt->is_yuv) val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE; else val = 0; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 1a7b08f35776..83f31c6e891c 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1496,7 +1496,6 @@ static int tegra_dc_late_register(struct drm_crtc *crtc) struct drm_minor *minor = crtc->dev->primary; struct dentry *root; struct tegra_dc *dc = to_tegra_dc(crtc); - int err; #ifdef CONFIG_DEBUG_FS root = crtc->debugfs_entry; @@ -1512,17 +1511,9 @@ static int tegra_dc_late_register(struct drm_crtc *crtc) for (i = 0; i < count; i++) dc->debugfs_files[i].data = dc; - err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor); - if (err < 0) - goto free; + drm_debugfs_create_files(dc->debugfs_files, count, root, minor); return 0; - -free: - kfree(dc->debugfs_files); - dc->debugfs_files = NULL; - - return err; } static void tegra_dc_early_unregister(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 7dfb50f65067..105fb9cdbb3b 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -5,12 +5,10 @@ #include <linux/clk.h> #include <linux/delay.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index bd268028fb3d..d4f51b5c7ee5 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -839,11 +839,11 @@ static struct drm_info_list tegra_debugfs_list[] = { { "iova", tegra_debugfs_iova, 0 }, }; -static int tegra_debugfs_init(struct drm_minor *minor) +static void tegra_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(tegra_debugfs_list, - ARRAY_SIZE(tegra_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(tegra_debugfs_list, + ARRAY_SIZE(tegra_debugfs_list), + minor->debugfs_root, minor); } #endif diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index ed99b67deb29..b25443255be6 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -9,7 +9,7 @@ #include <linux/host1x.h> #include <linux/iova.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #include <drm/drm_atomic.h> #include <drm/drm_edid.h> @@ -152,8 +152,6 @@ enum drm_connector_status tegra_output_connector_detect(struct drm_connector *connector, bool force); void tegra_output_connector_destroy(struct drm_connector *connector); -void tegra_output_encoder_destroy(struct drm_encoder *encoder); - /* from dpaux.c */ struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np); enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 88b9d64c77bf..38beab9ab4f8 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -22,6 +22,7 @@ #include <drm/drm_file.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> +#include <drm/drm_simple_kms_helper.h> #include "dc.h" #include "drm.h" @@ -234,7 +235,6 @@ static int tegra_dsi_late_register(struct drm_connector *connector) struct drm_minor *minor = connector->dev->primary; struct dentry *root = connector->debugfs_entry; struct tegra_dsi *dsi = to_dsi(output); - int err; dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), GFP_KERNEL); @@ -244,17 +244,9 @@ static int tegra_dsi_late_register(struct drm_connector *connector) for (i = 0; i < count; i++) dsi->debugfs_files[i].data = dsi; - err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor); - if (err < 0) - goto free; + drm_debugfs_create_files(dsi->debugfs_files, count, root, minor); return 0; - -free: - kfree(dsi->debugfs_files); - dsi->debugfs_files = NULL; - - return err; } static void tegra_dsi_early_unregister(struct drm_connector *connector) @@ -824,10 +816,6 @@ static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs .mode_valid = tegra_dsi_connector_mode_valid, }; -static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { - .destroy = tegra_output_encoder_destroy, -}; - static void tegra_dsi_unprepare(struct tegra_dsi *dsi) { int err; @@ -1058,9 +1046,8 @@ static int tegra_dsi_init(struct host1x_client *client) &tegra_dsi_connector_helper_funcs); dsi->output.connector.dpms = DRM_MODE_DPMS_OFF; - drm_encoder_init(drm, &dsi->output.encoder, - &tegra_dsi_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); + drm_simple_encoder_init(drm, &dsi->output.encoder, + DRM_MODE_ENCODER_DSI); drm_encoder_helper_add(&dsi->output.encoder, &tegra_dsi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index b8a328f53862..2b0666ac681b 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -4,7 +4,7 @@ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. * * Based on the KMS/FB CMA helpers - * Copyright (C) 2012 Analog Device Inc. + * Copyright (C) 2012 Analog Devices Inc. */ #include <linux/console.h> diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 38252c0f068d..d09a24931c87 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -7,7 +7,6 @@ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> -#include <linux/gpio.h> #include <linux/hdmi.h> #include <linux/math64.h> #include <linux/module.h> @@ -22,6 +21,7 @@ #include <drm/drm_file.h> #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "hda.h" #include "hdmi.h" @@ -1064,7 +1064,6 @@ static int tegra_hdmi_late_register(struct drm_connector *connector) struct drm_minor *minor = connector->dev->primary; struct dentry *root = connector->debugfs_entry; struct tegra_hdmi *hdmi = to_hdmi(output); - int err; hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), GFP_KERNEL); @@ -1074,17 +1073,9 @@ static int tegra_hdmi_late_register(struct drm_connector *connector) for (i = 0; i < count; i++) hdmi->debugfs_files[i].data = hdmi; - err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor); - if (err < 0) - goto free; + drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor); return 0; - -free: - kfree(hdmi->debugfs_files); - hdmi->debugfs_files = NULL; - - return err; } static void tegra_hdmi_early_unregister(struct drm_connector *connector) @@ -1136,10 +1127,6 @@ tegra_hdmi_connector_helper_funcs = { .mode_valid = tegra_hdmi_connector_mode_valid, }; -static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { - .destroy = tegra_output_encoder_destroy, -}; - static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) { struct tegra_output *output = encoder_to_output(encoder); @@ -1445,8 +1432,8 @@ static int tegra_hdmi_init(struct host1x_client *client) &tegra_hdmi_connector_helper_funcs); hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; - drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, &hdmi->output.encoder, + DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&hdmi->output.encoder, &tegra_hdmi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index a264259b97a2..e36e5e7c2f69 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_panel.h> +#include <drm/drm_simple_kms_helper.h> #include "drm.h" #include "dc.h" @@ -79,11 +80,6 @@ void tegra_output_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -void tegra_output_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - static irqreturn_t hpd_irq(int irq, void *data) { struct tegra_output *output = data; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 4be4dfd4a68a..0562a7eb793f 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_panel.h> +#include <drm/drm_simple_kms_helper.h> #include "drm.h" #include "dc.h" @@ -110,10 +111,6 @@ static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs .mode_valid = tegra_rgb_connector_mode_valid, }; -static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { - .destroy = tegra_output_encoder_destroy, -}; - static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) { struct tegra_output *output = encoder_to_output(encoder); @@ -281,8 +278,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) &tegra_rgb_connector_helper_funcs); output->connector.dpms = DRM_MODE_DPMS_OFF; - drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(&output->encoder, &tegra_rgb_encoder_helper_funcs); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 81226a4953c1..7cbcf9617f5e 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -6,7 +6,6 @@ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/debugfs.h> -#include <linux/gpio.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> @@ -23,6 +22,7 @@ #include <drm/drm_file.h> #include <drm/drm_panel.h> #include <drm/drm_scdc_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "dc.h" #include "dp.h" @@ -1687,7 +1687,6 @@ static int tegra_sor_late_register(struct drm_connector *connector) struct drm_minor *minor = connector->dev->primary; struct dentry *root = connector->debugfs_entry; struct tegra_sor *sor = to_sor(output); - int err; sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), GFP_KERNEL); @@ -1697,17 +1696,9 @@ static int tegra_sor_late_register(struct drm_connector *connector) for (i = 0; i < count; i++) sor->debugfs_files[i].data = sor; - err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor); - if (err < 0) - goto free; + drm_debugfs_create_files(sor->debugfs_files, count, root, minor); return 0; - -free: - kfree(sor->debugfs_files); - sor->debugfs_files = NULL; - - return err; } static void tegra_sor_early_unregister(struct drm_connector *connector) @@ -1805,10 +1796,6 @@ static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs .mode_valid = tegra_sor_connector_mode_valid, }; -static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { - .destroy = tegra_output_encoder_destroy, -}; - static int tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -3102,8 +3089,7 @@ static int tegra_sor_init(struct host1x_client *client) &tegra_sor_connector_helper_funcs); sor->output.connector.dpms = DRM_MODE_DPMS_OFF; - drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, - encoder, NULL); + drm_simple_encoder_init(drm, &sor->output.encoder, encoder); drm_encoder_helper_add(&sor->output.encoder, helpers); drm_connector_attach_encoder(&sor->output.connector, diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index d95e4be2c7b9..ad449d104306 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_irq.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include "tidss_dispc.h" @@ -102,15 +103,7 @@ static const struct dev_pm_ops tidss_pm_ops = { static void tidss_release(struct drm_device *ddev) { - struct tidss_device *tidss = ddev->dev_private; - drm_kms_helper_poll_fini(ddev); - - tidss_modeset_cleanup(tidss); - - drm_dev_fini(ddev); - - kfree(tidss); } DEFINE_DRM_GEM_CMA_FOPS(tidss_fops); @@ -154,6 +147,7 @@ static int tidss_probe(struct platform_device *pdev) kfree(ddev); return ret; } + drmm_add_final_kfree(ddev, tidss); tidss->dev = dev; tidss->feat = of_device_get_match_data(dev); diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c index 83785b0a66a9..4c0558286f5e 100644 --- a/drivers/gpu/drm/tidss/tidss_encoder.c +++ b/drivers/gpu/drm/tidss/tidss_encoder.c @@ -8,8 +8,9 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_panel.h> #include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_simple_kms_helper.h> #include "tidss_crtc.h" #include "tidss_drv.h" @@ -59,10 +60,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = { .atomic_check = tidss_encoder_atomic_check, }; -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss, u32 encoder_type, u32 possible_crtcs) { @@ -75,8 +72,7 @@ struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss, enc->possible_crtcs = possible_crtcs; - ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs, - encoder_type, NULL); + ret = drm_simple_encoder_init(&tidss->ddev, enc, encoder_type); if (ret < 0) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index 7d419960b030..4bd339a467a4 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -258,7 +258,9 @@ int tidss_modeset_init(struct tidss_device *tidss) dev_dbg(tidss->dev, "%s\n", __func__); - drm_mode_config_init(ddev); + ret = drmm_mode_config_init(ddev); + if (ret) + return ret; ddev->mode_config.min_width = 8; ddev->mode_config.min_height = 8; @@ -270,11 +272,11 @@ int tidss_modeset_init(struct tidss_device *tidss) ret = tidss_dispc_modeset_init(tidss); if (ret) - goto err_mode_config_cleanup; + return ret; ret = drm_vblank_init(ddev, tidss->num_crtcs); if (ret) - goto err_mode_config_cleanup; + return ret; /* Start with vertical blanking interrupt reporting disabled. */ for (i = 0; i < tidss->num_crtcs; ++i) @@ -285,15 +287,4 @@ int tidss_modeset_init(struct tidss_device *tidss) dev_dbg(tidss->dev, "%s done\n", __func__); return 0; - -err_mode_config_cleanup: - drm_mode_config_cleanup(ddev); - return ret; -} - -void tidss_modeset_cleanup(struct tidss_device *tidss) -{ - struct drm_device *ddev = &tidss->ddev; - - drm_mode_config_cleanup(ddev); } diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h index dda5625d0128..99aaff099f22 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.h +++ b/drivers/gpu/drm/tidss/tidss_kms.h @@ -10,6 +10,5 @@ struct tidss_device; int tidss_modeset_init(struct tidss_device *tidss); -void tidss_modeset_cleanup(struct tidss_device *tidss); #endif diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 0791a0200cc3..a5e9ee4c7fbf 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -390,10 +390,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) ret = drm_dev_register(ddev, 0); if (ret) goto init_failed; + priv->is_registered = true; drm_fbdev_generic_setup(ddev, bpp); - - priv->is_registered = true; return 0; init_failed: @@ -478,26 +477,17 @@ static struct drm_info_list tilcdc_debugfs_list[] = { { "mm", tilcdc_mm_show, 0 }, }; -static int tilcdc_debugfs_init(struct drm_minor *minor) +static void tilcdc_debugfs_init(struct drm_minor *minor) { - struct drm_device *dev = minor->dev; struct tilcdc_module *mod; - int ret; - ret = drm_debugfs_create_files(tilcdc_debugfs_list, - ARRAY_SIZE(tilcdc_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(tilcdc_debugfs_list, + ARRAY_SIZE(tilcdc_debugfs_list), + minor->debugfs_root, minor); list_for_each_entry(mod, &module_list, list) if (mod->funcs->debugfs_init) mod->funcs->debugfs_init(mod, minor); - - if (ret) { - dev_err(dev->dev, "could not install tilcdc_debugfs_list\n"); - return ret; - } - - return ret; } #endif diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 28b7f703236e..b177525588c1 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -10,6 +10,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> +#include <drm/drm_simple_kms_helper.h> #include "tilcdc_drv.h" #include "tilcdc_external.h" @@ -83,10 +84,6 @@ int tilcdc_add_component_encoder(struct drm_device *ddev) return 0; } -static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge) { @@ -131,9 +128,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev) if (!priv->external_encoder) return -ENOMEM; - ret = drm_encoder_init(ddev, priv->external_encoder, - &tilcdc_external_encoder_funcs, - DRM_MODE_ENCODER_NONE, NULL); + ret = drm_simple_encoder_init(ddev, priv->external_encoder, + DRM_MODE_ENCODER_NONE); if (ret) { dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret); return ret; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 5584e656b857..12823d60c4e8 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -16,6 +16,7 @@ #include <drm/drm_connector.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "tilcdc_drv.h" #include "tilcdc_panel.h" @@ -74,10 +75,6 @@ static void panel_encoder_mode_set(struct drm_encoder *encoder, /* nothing needed */ } -static const struct drm_encoder_funcs panel_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = { .dpms = panel_encoder_dpms, .prepare = panel_encoder_prepare, @@ -102,8 +99,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev, encoder = &panel_encoder->base; encoder->possible_crtcs = 1; - ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); if (ret < 0) goto fail; diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index a48173441ae0..6f0ea2827d62 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -19,6 +19,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -87,15 +88,13 @@ struct gm12u320_device { struct usb_device *udev; unsigned char *cmd_buf; unsigned char *data_buf[GM12U320_BLOCK_COUNT]; - bool pipe_enabled; struct { - bool run; - struct workqueue_struct *workq; - struct work_struct work; - wait_queue_head_t waitq; + struct delayed_work work; struct mutex lock; struct drm_framebuffer *fb; struct drm_rect rect; + int frame; + int draw_status_timeout; } fb_update; }; @@ -159,7 +158,7 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320) int i, block_size; const char *hdr; - gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL); + gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL); if (!gm12u320->cmd_buf) return -ENOMEM; @@ -172,7 +171,8 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320) hdr = data_block_header; } - gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL); + gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev, + block_size, GFP_KERNEL); if (!gm12u320->data_buf[i]) return -ENOMEM; @@ -182,26 +182,9 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320) data_block_footer, DATA_BLOCK_FOOTER_SIZE); } - gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME); - if (!gm12u320->fb_update.workq) - return -ENOMEM; - return 0; } -static void gm12u320_usb_free(struct gm12u320_device *gm12u320) -{ - int i; - - if (gm12u320->fb_update.workq) - destroy_workqueue(gm12u320->fb_update.workq); - - for (i = 0; i < GM12U320_BLOCK_COUNT; i++) - kfree(gm12u320->data_buf[i]); - - kfree(gm12u320->cmd_buf); -} - static int gm12u320_misc_request(struct gm12u320_device *gm12u320, u8 req_a, u8 req_b, u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d) @@ -344,80 +327,77 @@ unlock: static void gm12u320_fb_update_work(struct work_struct *work) { struct gm12u320_device *gm12u320 = - container_of(work, struct gm12u320_device, fb_update.work); - int draw_status_timeout = FIRST_FRAME_TIMEOUT; + container_of(to_delayed_work(work), struct gm12u320_device, + fb_update.work); int block, block_size, len; - int frame = 0; int ret = 0; - while (gm12u320->fb_update.run) { - gm12u320_copy_fb_to_blocks(gm12u320); - - for (block = 0; block < GM12U320_BLOCK_COUNT; block++) { - if (block == GM12U320_BLOCK_COUNT - 1) - block_size = DATA_LAST_BLOCK_SIZE; - else - block_size = DATA_BLOCK_SIZE; - - /* Send data command to device */ - memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE); - gm12u320->cmd_buf[8] = block_size & 0xff; - gm12u320->cmd_buf[9] = block_size >> 8; - gm12u320->cmd_buf[20] = 0xfc - block * 4; - gm12u320->cmd_buf[21] = block | (frame << 7); - - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->cmd_buf, CMD_SIZE, &len, - CMD_TIMEOUT); - if (ret || len != CMD_SIZE) - goto err; - - /* Send data block to device */ - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->data_buf[block], block_size, - &len, DATA_TIMEOUT); - if (ret || len != block_size) - goto err; - - /* Read status */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), - gm12u320->cmd_buf, READ_STATUS_SIZE, &len, - CMD_TIMEOUT); - if (ret || len != READ_STATUS_SIZE) - goto err; - } + gm12u320_copy_fb_to_blocks(gm12u320); + + for (block = 0; block < GM12U320_BLOCK_COUNT; block++) { + if (block == GM12U320_BLOCK_COUNT - 1) + block_size = DATA_LAST_BLOCK_SIZE; + else + block_size = DATA_BLOCK_SIZE; + + /* Send data command to device */ + memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE); + gm12u320->cmd_buf[8] = block_size & 0xff; + gm12u320->cmd_buf[9] = block_size >> 8; + gm12u320->cmd_buf[20] = 0xfc - block * 4; + gm12u320->cmd_buf[21] = + block | (gm12u320->fb_update.frame << 7); - /* Send draw command to device */ - memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE); ret = usb_bulk_msg(gm12u320->udev, usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); + gm12u320->cmd_buf, CMD_SIZE, &len, + CMD_TIMEOUT); if (ret || len != CMD_SIZE) goto err; + /* Send data block to device */ + ret = usb_bulk_msg(gm12u320->udev, + usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), + gm12u320->data_buf[block], block_size, + &len, DATA_TIMEOUT); + if (ret || len != block_size) + goto err; + /* Read status */ ret = usb_bulk_msg(gm12u320->udev, usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), gm12u320->cmd_buf, READ_STATUS_SIZE, &len, - draw_status_timeout); + CMD_TIMEOUT); if (ret || len != READ_STATUS_SIZE) goto err; - - draw_status_timeout = CMD_TIMEOUT; - frame = !frame; - - /* - * We must draw a frame every 2s otherwise the projector - * switches back to showing its logo. - */ - wait_event_timeout(gm12u320->fb_update.waitq, - !gm12u320->fb_update.run || - gm12u320->fb_update.fb != NULL, - IDLE_TIMEOUT); } + + /* Send draw command to device */ + memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE); + ret = usb_bulk_msg(gm12u320->udev, + usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), + gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); + if (ret || len != CMD_SIZE) + goto err; + + /* Read status */ + ret = usb_bulk_msg(gm12u320->udev, + usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), + gm12u320->cmd_buf, READ_STATUS_SIZE, &len, + gm12u320->fb_update.draw_status_timeout); + if (ret || len != READ_STATUS_SIZE) + goto err; + + gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT; + gm12u320->fb_update.frame = !gm12u320->fb_update.frame; + + /* + * We must draw a frame every 2s otherwise the projector + * switches back to showing its logo. + */ + queue_delayed_work(system_long_wq, &gm12u320->fb_update.work, + IDLE_TIMEOUT); + return; err: /* Do not log errors caused by module unload or device unplug */ @@ -452,36 +432,24 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, mutex_unlock(&gm12u320->fb_update.lock); if (wakeup) - wake_up(&gm12u320->fb_update.waitq); + mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0); if (old_fb) drm_framebuffer_put(old_fb); } -static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320) -{ - mutex_lock(&gm12u320->fb_update.lock); - gm12u320->fb_update.run = true; - mutex_unlock(&gm12u320->fb_update.lock); - - queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work); -} - static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320) { - mutex_lock(&gm12u320->fb_update.lock); - gm12u320->fb_update.run = false; - mutex_unlock(&gm12u320->fb_update.lock); + struct drm_framebuffer *old_fb; - wake_up(&gm12u320->fb_update.waitq); - cancel_work_sync(&gm12u320->fb_update.work); + cancel_delayed_work_sync(&gm12u320->fb_update.work); mutex_lock(&gm12u320->fb_update.lock); - if (gm12u320->fb_update.fb) { - drm_framebuffer_put(gm12u320->fb_update.fb); - gm12u320->fb_update.fb = NULL; - } + old_fb = gm12u320->fb_update.fb; + gm12u320->fb_update.fb = NULL; mutex_unlock(&gm12u320->fb_update.lock); + + drm_framebuffer_put(old_fb); } static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320) @@ -589,12 +557,11 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private; struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT }; + struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private; + gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT; gm12u320_fb_mark_dirty(plane_state->fb, &rect); - gm12u320_start_fb_update(gm12u320); - gm12u320->pipe_enabled = true; } static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe) @@ -602,7 +569,6 @@ static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe) struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private; gm12u320_stop_fb_update(gm12u320); - gm12u320->pipe_enabled = false; } static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe, @@ -630,16 +596,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DRM_FORMAT_MOD_INVALID }; -static void gm12u320_driver_release(struct drm_device *dev) -{ - struct gm12u320_device *gm12u320 = dev->dev_private; - - gm12u320_usb_free(gm12u320); - drm_mode_config_cleanup(dev); - drm_dev_fini(dev); - kfree(gm12u320); -} - DEFINE_DRM_GEM_FOPS(gm12u320_fops); static struct drm_driver gm12u320_drm_driver = { @@ -651,7 +607,6 @@ static struct drm_driver gm12u320_drm_driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, - .release = gm12u320_driver_release, .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, }; @@ -681,19 +636,22 @@ static int gm12u320_usb_probe(struct usb_interface *interface, return -ENOMEM; gm12u320->udev = interface_to_usbdev(interface); - INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); + INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); - init_waitqueue_head(&gm12u320->fb_update.waitq); dev = &gm12u320->dev; - ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev); + ret = devm_drm_dev_init(&interface->dev, dev, &gm12u320_drm_driver); if (ret) { kfree(gm12u320); return ret; } dev->dev_private = gm12u320; + drmm_add_final_kfree(dev, gm12u320); + + ret = drmm_mode_config_init(dev); + if (ret) + return ret; - drm_mode_config_init(dev); dev->mode_config.min_width = GM12U320_USER_WIDTH; dev->mode_config.max_width = GM12U320_USER_WIDTH; dev->mode_config.min_height = GM12U320_HEIGHT; @@ -702,15 +660,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface, ret = gm12u320_usb_alloc(gm12u320); if (ret) - goto err_put; + return ret; ret = gm12u320_set_ecomode(gm12u320); if (ret) - goto err_put; + return ret; ret = gm12u320_conn_init(gm12u320); if (ret) - goto err_put; + return ret; ret = drm_simple_display_pipe_init(&gm12u320->dev, &gm12u320->pipe, @@ -720,44 +678,34 @@ static int gm12u320_usb_probe(struct usb_interface *interface, gm12u320_pipe_modifiers, &gm12u320->conn); if (ret) - goto err_put; + return ret; drm_mode_config_reset(dev); usb_set_intfdata(interface, dev); ret = drm_dev_register(dev, 0); if (ret) - goto err_put; + return ret; drm_fbdev_generic_setup(dev, 0); return 0; - -err_put: - drm_dev_put(dev); - return ret; } static void gm12u320_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); - struct gm12u320_device *gm12u320 = dev->dev_private; - gm12u320_stop_fb_update(gm12u320); drm_dev_unplug(dev); - drm_dev_put(dev); + drm_atomic_helper_shutdown(dev); } static __maybe_unused int gm12u320_suspend(struct usb_interface *interface, pm_message_t message) { struct drm_device *dev = usb_get_intfdata(interface); - struct gm12u320_device *gm12u320 = dev->dev_private; - if (gm12u320->pipe_enabled) - gm12u320_stop_fb_update(gm12u320); - - return 0; + return drm_mode_config_helper_suspend(dev); } static __maybe_unused int gm12u320_resume(struct usb_interface *interface) @@ -766,10 +714,8 @@ static __maybe_unused int gm12u320_resume(struct usb_interface *interface) struct gm12u320_device *gm12u320 = dev->dev_private; gm12u320_set_ecomode(gm12u320); - if (gm12u320->pipe_enabled) - gm12u320_start_fb_update(gm12u320); - return 0; + return drm_mode_config_helper_resume(dev); } static const struct usb_device_id id_table[] = { diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 9af8ff84974f..af7f3d10aac3 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -21,6 +21,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> #include <video/mipi_display.h> @@ -195,7 +196,6 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); static struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hx8357d_fops, - .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "hx8357d", @@ -236,8 +236,7 @@ static int hx8357d_probe(struct spi_device *spi) kfree(dbidev); return ret; } - - drm_mode_config_init(drm); + drmm_add_final_kfree(drm, dbidev); dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW); if (IS_ERR(dc)) { diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 802fb8dde1b6..118477af4491 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -24,6 +24,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_rect.h> @@ -345,7 +346,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops); static struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9225_fops, - .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .name = "ili9225", .desc = "Ilitek ILI9225", @@ -387,8 +387,7 @@ static int ili9225_probe(struct spi_device *spi) kfree(dbidev); return ret; } - - drm_mode_config_init(drm); + drmm_add_final_kfree(drm, dbidev); dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(dbi->reset)) { diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index 33b51dc7faa8..e152de369019 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -20,6 +20,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> #include <video/mipi_display.h> @@ -151,7 +152,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); static struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9341_fops, - .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9341", @@ -194,8 +194,7 @@ static int ili9341_probe(struct spi_device *spi) kfree(dbidev); return ret; } - - drm_mode_config_init(drm); + drmm_add_final_kfree(drm, dbidev); dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(dbi->reset)) { diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 532560aebb1e..c4079bf9e2c8 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -19,6 +19,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> @@ -164,7 +165,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops); static struct drm_driver ili9486_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9486_fops, - .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9486", @@ -208,8 +208,7 @@ static int ili9486_probe(struct spi_device *spi) kfree(dbidev); return ret; } - - drm_mode_config_init(drm); + drmm_add_final_kfree(drm, dbidev); dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(dbi->reset)) { diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index e2cfd9a17143..decaf57053ff 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -18,6 +18,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> #include <video/mipi_display.h> @@ -155,7 +156,6 @@ DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops); static struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &mi0283qt_fops, - .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "mi0283qt", @@ -198,8 +198,7 @@ static int mi0283qt_probe(struct spi_device *spi) kfree(dbidev); return ret; } - - drm_mode_config_init(drm); + drmm_add_final_kfree(drm, dbidev); dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(dbi->reset)) { diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index f5ebcaf7ee3a..862c3ee6055d 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -31,6 +31,7 @@ #include <drm/drm_format_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_modes.h> #include <drm/drm_rect.h> #include <drm/drm_probe_helper.h> @@ -908,17 +909,6 @@ static const struct drm_mode_config_funcs repaper_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static void repaper_release(struct drm_device *drm) -{ - struct repaper_epd *epd = drm_to_epd(drm); - - DRM_DEBUG_DRIVER("\n"); - - drm_mode_config_cleanup(drm); - drm_dev_fini(drm); - kfree(epd); -} - static const uint32_t repaper_formats[] = { DRM_FORMAT_XRGB8888, }; @@ -956,7 +946,6 @@ DEFINE_DRM_GEM_CMA_FOPS(repaper_fops); static struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &repaper_fops, - .release = repaper_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .name = "repaper", .desc = "Pervasive Displays RePaper e-ink panels", @@ -1024,8 +1013,11 @@ static int repaper_probe(struct spi_device *spi) kfree(epd); return ret; } + drmm_add_final_kfree(drm, epd); - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) + return ret; drm->mode_config.funcs = &repaper_mode_config_funcs; epd->spi = spi; diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index 9ef559dd3191..c3295c717ba6 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -21,6 +21,7 @@ #include <drm/drm_format_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_rect.h> @@ -284,7 +285,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); static struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7586_fops, - .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7586", @@ -328,8 +328,7 @@ static int st7586_probe(struct spi_device *spi) kfree(dbidev); return ret; } - - drm_mode_config_init(drm); + drmm_add_final_kfree(drm, dbidev); bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay; diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index 3cd9b8d9888d..c2c7dc0224dd 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -21,6 +21,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #define ST7735R_FRMCTR1 0xb1 @@ -156,7 +157,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops); static struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7735r_fops, - .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7735r", @@ -209,8 +209,7 @@ static int st7735r_probe(struct spi_device *spi) kfree(dbidev); return ret; } - - drm_mode_config_init(drm); + drmm_add_final_kfree(drm, dbidev); dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(dbi->reset)) { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9e07c3f75156..f73b81c2576e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -588,7 +588,8 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_unlock(man); } - if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) { + if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || + !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); bo->deleted = true; @@ -621,6 +622,7 @@ static void ttm_bo_release(struct kref *kref) spin_unlock(&ttm_bo_glob.lru_lock); ttm_bo_cleanup_memtype_use(bo); + dma_resv_unlock(bo->base.resv); BUG_ON(bo->mem.mm_node != NULL); atomic_dec(&ttm_bo_glob.bo_count); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 389128b8c4dd..0ad30b112982 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -59,9 +59,10 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * If possible, avoid waiting for GPU with mmap_sem - * held. + * held. We only do this if the fault allows retry and this + * is the first attempt. */ - if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault_flag_allow_retry_first(vmf->flags)) { ret = VM_FAULT_RETRY; if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) goto out_unlock; @@ -135,7 +136,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, * for the buffer to become unreserved. */ if (unlikely(!dma_resv_trylock(bo->base.resv))) { - if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { + /* + * If the fault allows retry and this is the first + * fault attempt, we try to release the mmap_sem + * before waiting + */ + if (fault_flag_allow_retry_first(vmf->flags)) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ttm_bo_get(bo); up_read(&vmf->vma->vm_mm->mmap_sem); @@ -156,6 +162,89 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_vm_reserve); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/** + * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults + * @vmf: Fault data + * @bo: The buffer object + * @page_offset: Page offset from bo start + * @fault_page_size: The size of the fault in pages. + * @pgprot: The page protections. + * Does additional checking whether it's possible to insert a PUD or PMD + * pfn and performs the insertion. + * + * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if + * a huge fault was not possible, or on insertion error. + */ +static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, + struct ttm_buffer_object *bo, + pgoff_t page_offset, + pgoff_t fault_page_size, + pgprot_t pgprot) +{ + pgoff_t i; + vm_fault_t ret; + unsigned long pfn; + pfn_t pfnt; + struct ttm_tt *ttm = bo->ttm; + bool write = vmf->flags & FAULT_FLAG_WRITE; + + /* Fault should not cross bo boundary. */ + page_offset &= ~(fault_page_size - 1); + if (page_offset + fault_page_size > bo->num_pages) + goto out_fallback; + + if (bo->mem.bus.is_iomem) + pfn = ttm_bo_io_mem_pfn(bo, page_offset); + else + pfn = page_to_pfn(ttm->pages[page_offset]); + + /* pfn must be fault_page_size aligned. */ + if ((pfn & (fault_page_size - 1)) != 0) + goto out_fallback; + + /* Check that memory is contiguous. */ + if (!bo->mem.bus.is_iomem) { + for (i = 1; i < fault_page_size; ++i) { + if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) + goto out_fallback; + } + } else if (bo->bdev->driver->io_mem_pfn) { + for (i = 1; i < fault_page_size; ++i) { + if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) + goto out_fallback; + } + } + + pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); + if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) + ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) + ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); +#endif + else + WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); + + if (ret != VM_FAULT_NOPAGE) + goto out_fallback; + + return VM_FAULT_NOPAGE; +out_fallback: + count_vm_event(THP_FAULT_FALLBACK); + return VM_FAULT_FALLBACK; +} +#else +static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, + struct ttm_buffer_object *bo, + pgoff_t page_offset, + pgoff_t fault_page_size, + pgprot_t pgprot) +{ + return VM_FAULT_FALLBACK; +} +#endif + /** * ttm_bo_vm_fault_reserved - TTM fault helper * @vmf: The struct vm_fault given as argument to the fault callback @@ -163,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve); * @num_prefault: Maximum number of prefault pages. The caller may want to * specify this based on madvice settings and the size of the GPU object * backed by the memory. + * @fault_page_size: The size of the fault in pages. * * This function inserts one or more page table entries pointing to the * memory backing the buffer object, and then returns a return code @@ -176,7 +266,8 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve); */ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, - pgoff_t num_prefault) + pgoff_t num_prefault, + pgoff_t fault_page_size) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = vma->vm_private_data; @@ -268,6 +359,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, prot = pgprot_decrypted(prot); } + /* We don't prefault on huge faults. Yet. */ + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { + ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, + fault_page_size, prot); + goto out_io_unlock; + } + /* * Speculatively prefault a number of pages. Only error on * first page. @@ -334,7 +432,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) return ret; prot = vma->vm_page_prot; - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -445,7 +543,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, - .access = ttm_bo_vm_access + .access = ttm_bo_vm_access, }; static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index e6c1cd77d4d4..9cc6d075cb40 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -10,6 +10,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> #include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> @@ -33,17 +34,8 @@ static int udl_usb_resume(struct usb_interface *interface) DEFINE_DRM_GEM_FOPS(udl_driver_fops); -static void udl_driver_release(struct drm_device *dev) -{ - udl_fini(dev); - udl_modeset_cleanup(dev); - drm_dev_fini(dev); - kfree(dev); -} - static struct drm_driver driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, - .release = udl_driver_release, /* gem hooks */ .gem_create_object = udl_driver_gem_create_object, @@ -77,11 +69,11 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface) udl->udev = udev; udl->drm.dev_private = udl; + drmm_add_final_kfree(&udl->drm, udl); r = udl_init(udl); if (r) { - drm_dev_fini(&udl->drm); - kfree(udl); + drm_dev_put(&udl->drm); return ERR_PTR(r); } @@ -105,14 +97,10 @@ static int udl_usb_probe(struct usb_interface *interface, DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index); - r = drm_fbdev_generic_setup(&udl->drm, 0); - if (r) - goto err_drm_dev_unregister; + drm_fbdev_generic_setup(&udl->drm, 0); return 0; -err_drm_dev_unregister: - drm_dev_unregister(&udl->drm); err_free: drm_dev_put(&udl->drm); return r; @@ -122,7 +110,7 @@ static void udl_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); - drm_kms_helper_poll_disable(dev); + drm_kms_helper_poll_fini(dev); udl_drop_usb(dev); drm_dev_unplug(dev); drm_dev_put(dev); diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index e67227c44cc4..2642f94a63fc 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -68,7 +68,6 @@ struct udl_device { /* modeset */ int udl_modeset_init(struct drm_device *dev); -void udl_modeset_cleanup(struct drm_device *dev); struct drm_connector *udl_connector_init(struct drm_device *dev); struct urb *udl_get_urb(struct drm_device *dev); @@ -77,7 +76,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len); void udl_urb_completion(struct urb *urb); int udl_init(struct udl_device *udl); -void udl_fini(struct drm_device *dev); int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 538718919916..f5d27f2a5654 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -351,13 +351,3 @@ int udl_drop_usb(struct drm_device *dev) udl_free_urb_list(dev); return 0; } - -void udl_fini(struct drm_device *dev) -{ - struct udl_device *udl = to_udl(dev); - - drm_kms_helper_poll_fini(dev); - - if (udl->urbs.count) - udl_free_urb_list(dev); -} diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index d59ebac70b15..8cad01f3d163 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -468,7 +468,9 @@ int udl_modeset_init(struct drm_device *dev) struct drm_connector *connector; int ret; - drm_mode_config_init(dev); + ret = drmm_mode_config_init(dev); + if (ret) + return ret; dev->mode_config.min_width = 640; dev->mode_config.min_height = 480; @@ -482,10 +484,8 @@ int udl_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &udl_mode_funcs; connector = udl_connector_init(dev); - if (IS_ERR(connector)) { - ret = PTR_ERR(connector); - goto err_drm_mode_config_cleanup; - } + if (IS_ERR(connector)) + return PTR_ERR(connector); format_count = ARRAY_SIZE(udl_simple_display_pipe_formats); @@ -494,18 +494,9 @@ int udl_modeset_init(struct drm_device *dev) udl_simple_display_pipe_formats, format_count, NULL, connector); if (ret) - goto err_drm_mode_config_cleanup; + return ret; drm_mode_config_reset(dev); return 0; - -err_drm_mode_config_cleanup: - drm_mode_config_cleanup(dev); - return ret; -} - -void udl_modeset_cleanup(struct drm_device *dev) -{ - drm_mode_config_cleanup(dev); } diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 9e953ce64ef7..2b0ea5f8febd 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -258,10 +258,10 @@ static const struct drm_info_list v3d_debugfs_list[] = { {"bo_stats", v3d_debugfs_bo_stats, 0}, }; -int +void v3d_debugfs_init(struct drm_minor *minor) { - return drm_debugfs_create_files(v3d_debugfs_list, - ARRAY_SIZE(v3d_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_create_files(v3d_debugfs_list, + ARRAY_SIZE(v3d_debugfs_list), + minor->debugfs_root, minor); } diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index eaa8e9682373..8d0c0daaac81 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -25,6 +25,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_managed.h> #include <uapi/drm/v3d_drm.h> #include "v3d_drv.h" @@ -257,13 +258,23 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) v3d->pdev = pdev; drm = &v3d->drm; + ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev); + if (ret) { + kfree(v3d); + return ret; + } + + platform_set_drvdata(pdev, drm); + drm->dev_private = v3d; + drmm_add_final_kfree(drm, v3d); + ret = map_regs(v3d, &v3d->hub_regs, "hub"); if (ret) - goto dev_free; + goto dev_destroy; ret = map_regs(v3d, &v3d->core_regs[0], "core0"); if (ret) - goto dev_free; + goto dev_destroy; mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); dev->coherent_dma_mask = @@ -281,21 +292,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) ret = PTR_ERR(v3d->reset); if (ret == -EPROBE_DEFER) - goto dev_free; + goto dev_destroy; v3d->reset = NULL; ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); if (ret) { dev_err(dev, "Failed to get reset control or bridge regs\n"); - goto dev_free; + goto dev_destroy; } } if (v3d->ver < 41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) - goto dev_free; + goto dev_destroy; } v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, @@ -303,23 +314,16 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (!v3d->mmu_scratch) { dev_err(dev, "Failed to allocate MMU scratch page\n"); ret = -ENOMEM; - goto dev_free; + goto dev_destroy; } pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_enable(dev); - ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev); - if (ret) - goto dma_free; - - platform_set_drvdata(pdev, drm); - drm->dev_private = v3d; - ret = v3d_gem_init(drm); if (ret) - goto dev_destroy; + goto dma_free; ret = v3d_irq_init(v3d); if (ret) @@ -335,12 +339,10 @@ irq_disable: v3d_irq_disable(v3d); gem_destroy: v3d_gem_destroy(drm); -dev_destroy: - drm_dev_put(drm); dma_free: dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); -dev_free: - kfree(v3d); +dev_destroy: + drm_dev_put(drm); return ret; } diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index ac2603334587..e0775c884553 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -316,7 +316,7 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); /* v3d_debugfs.c */ -int v3d_debugfs_init(struct drm_minor *minor); +void v3d_debugfs_init(struct drm_minor *minor); /* v3d_fence.c */ extern const struct dma_fence_ops v3d_fence_ops; diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 8512d970a09f..282348e071fe 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> #include "vbox_drv.h" @@ -41,6 +42,10 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) return -ENODEV; + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb"); + if (ret) + return ret; + vbox = kzalloc(sizeof(*vbox), GFP_KERNEL); if (!vbox) return -ENOMEM; @@ -54,6 +59,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vbox->ddev.pdev = pdev; vbox->ddev.dev_private = vbox; pci_set_drvdata(pdev, vbox); + drmm_add_final_kfree(&vbox->ddev, vbox); mutex_init(&vbox->hw_mutex); ret = pci_enable_device(pdev); @@ -76,14 +82,12 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto err_mode_fini; - ret = drm_fbdev_generic_setup(&vbox->ddev, 32); - if (ret) - goto err_irq_fini; - ret = drm_dev_register(&vbox->ddev, 0); if (ret) goto err_irq_fini; + drm_fbdev_generic_setup(&vbox->ddev, 32); + return 0; err_irq_fini: diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index b61b2d3407b5..4fbbf980a299 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -20,7 +20,7 @@ struct vc4_debugfs_info_entry { * Called at drm_dev_register() time on each of the minors registered * by the DRM device, to attach the debugfs files. */ -int +void vc4_debugfs_init(struct drm_minor *minor) { struct vc4_dev *vc4 = to_vc4_dev(minor->dev); @@ -30,14 +30,9 @@ vc4_debugfs_init(struct drm_minor *minor) minor->debugfs_root, &vc4->load_tracker_enabled); list_for_each_entry(entry, &vc4->debugfs_list, link) { - int ret = drm_debugfs_create_files(&entry->info, 1, - minor->debugfs_root, minor); - - if (ret) - return ret; + drm_debugfs_create_files(&entry->info, 1, + minor->debugfs_root, minor); } - - return 0; } static int vc4_debugfs_regset32(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 6dfede03396e..a90f2545baee 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -17,6 +17,7 @@ #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/of_graph.h> @@ -114,10 +115,6 @@ static const struct debugfs_reg32 dpi_regs[] = { VC4_REG32(DPI_ID), }; -static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static void vc4_dpi_encoder_disable(struct drm_encoder *encoder) { struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder); @@ -309,8 +306,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data) if (ret) DRM_ERROR("Failed to turn on core clock: %d\n", ret); - drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs, - DRM_MODE_ENCODER_DPI, NULL); + drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI); drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs); ret = vc4_dpi_init_bridge(dpi); diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 139d25a8328e..3b1f02efefbe 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -759,7 +759,7 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state, unsigned int *top, unsigned int *bottom); /* vc4_debugfs.c */ -int vc4_debugfs_init(struct drm_minor *minor); +void vc4_debugfs_init(struct drm_minor *minor); #ifdef CONFIG_DEBUG_FS void vc4_debugfs_add_file(struct drm_device *drm, const char *filename, diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index d99b1d526651..eaf276978ee7 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -37,6 +37,7 @@ #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "vc4_drv.h" #include "vc4_regs.h" @@ -652,15 +653,6 @@ static const struct debugfs_reg32 dsi1_regs[] = { VC4_REG32(DSI1_ID), }; -static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = { - .destroy = vc4_dsi_encoder_destroy, -}; - static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch) { u32 afec0 = DSI_PORT_READ(PHY_AFEC0); @@ -1615,8 +1607,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) if (dsi->port == 1) vc4->dsi1 = dsi; - drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); + drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI); drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs); ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0); @@ -1656,7 +1647,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master, * normally. */ list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain); - vc4_dsi_encoder_destroy(dsi->encoder); + drm_encoder_cleanup(dsi->encoder); if (dsi->port == 1) vc4->dsi1 = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index cea18dc15f77..625bfcf52dc4 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -34,6 +34,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/i2c.h> @@ -306,15 +307,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, return connector; } -static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = { - .destroy = vc4_hdmi_encoder_destroy, -}; - static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, enum hdmi_infoframe_type type) { @@ -681,11 +673,23 @@ static enum drm_mode_status vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, const struct drm_display_mode *mode) { - /* HSM clock must be 108% of the pixel clock. Additionally, - * the AXI clock needs to be at least 25% of pixel clock, but - * HSM ends up being the limiting factor. + /* + * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must + * be faster than pixel clock, infinitesimally faster, tested in + * simulation. Otherwise, exact value is unimportant for HDMI + * operation." This conflicts with bcm2835's vc4 documentation, which + * states HSM's clock has to be at least 108% of the pixel clock. + * + * Real life tests reveal that vc4's firmware statement holds up, and + * users are able to use pixel clocks closer to HSM's, namely for + * 1920x1200@60Hz. So it was decided to have leave a 1% margin between + * both clocks. Which, for RPi0-3 implies a maximum pixel clock of + * 162MHz. + * + * Additionally, the AXI clock needs to be at least 25% of + * pixel clock, but HSM ends up being the limiting factor. */ - if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100)) + if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) return MODE_CLOCK_HIGH; return MODE_OK; @@ -1394,8 +1398,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) } pm_runtime_enable(dev); - drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs); hdmi->connector = @@ -1453,7 +1456,7 @@ err_destroy_conn: vc4_hdmi_connector_destroy(hdmi->connector); #endif err_destroy_encoder: - vc4_hdmi_encoder_destroy(hdmi->encoder); + drm_encoder_cleanup(hdmi->encoder); err_unprepare_hsm: clk_disable_unprepare(hdmi->hsm_clock); pm_runtime_disable(dev); @@ -1472,7 +1475,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master, cec_unregister_adapter(hdmi->cec_adap); vc4_hdmi_connector_destroy(hdmi->connector); - vc4_hdmi_encoder_destroy(hdmi->encoder); + drm_encoder_cleanup(hdmi->encoder); clk_disable_unprepare(hdmi->hsm_clock); pm_runtime_disable(dev); diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c index 7402bc768664..bd5b8eb58b18 100644 --- a/drivers/gpu/drm/vc4/vc4_vec.c +++ b/drivers/gpu/drm/vc4/vc4_vec.c @@ -17,6 +17,7 @@ #include <drm/drm_edid.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/of_graph.h> @@ -374,10 +375,6 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev, return connector; } -static const struct drm_encoder_funcs vc4_vec_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static void vc4_vec_encoder_disable(struct drm_encoder *encoder) { struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder); @@ -566,8 +563,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data) pm_runtime_enable(dev); - drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs, - DRM_MODE_ENCODER_TVDAC, NULL); + drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs); vec->connector = vc4_vec_connector_init(drm, vec); diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 909eba43664a..ec1a8ebb6f1b 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -39,6 +39,7 @@ #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> #include <drm/drm_prime.h> #include "vgem_drv.h" @@ -431,9 +432,6 @@ static void vgem_release(struct drm_device *dev) struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); platform_device_unregister(vgem->platform); - drm_dev_fini(&vgem->drm); - - kfree(vgem); } static struct drm_driver vgem_driver = { @@ -489,16 +487,19 @@ static int __init vgem_init(void) &vgem_device->platform->dev); if (ret) goto out_unregister; + drmm_add_final_kfree(&vgem_device->drm, vgem_device); /* Final step: expose the device/driver to userspace */ - ret = drm_dev_register(&vgem_device->drm, 0); + ret = drm_dev_register(&vgem_device->drm, 0); if (ret) - goto out_fini; + goto out_put; return 0; -out_fini: - drm_dev_fini(&vgem_device->drm); +out_put: + drm_dev_put(&vgem_device->drm); + return ret; + out_unregister: platform_device_unregister(vgem_device->platform); out_free: diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index e27120d512b0..3221520f61f0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -72,11 +72,10 @@ static struct drm_info_list virtio_gpu_debugfs_list[] = { #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list) -int +void virtio_gpu_debugfs_init(struct drm_minor *minor) { drm_debugfs_create_files(virtio_gpu_debugfs_list, VIRTIO_GPU_DEBUGFS_ENTRIES, minor->debugfs_root, minor); - return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 2b7e6ae65546..cc7fd957a307 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -30,6 +30,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "virtgpu_drv.h" @@ -240,10 +241,6 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static const struct drm_encoder_funcs virtio_gpu_enc_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) { struct drm_device *dev = vgdev->ddev; @@ -276,8 +273,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) if (vgdev->has_edid) drm_connector_attach_edid_property(connector); - drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs, - DRM_MODE_ENCODER_VIRTUAL, NULL); + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs); encoder->possible_crtcs = 1 << index; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index c1824bdf2418..49bebdee6d91 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -218,26 +218,18 @@ struct virtio_gpu_fpriv { struct mutex context_lock; }; -/* virtio_ioctl.c */ +/* virtgpu_ioctl.c */ #define DRM_VIRTIO_NUM_IOCTLS 10 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; -/* virtio_kms.c */ +/* virtgpu_kms.c */ int virtio_gpu_init(struct drm_device *dev); void virtio_gpu_deinit(struct drm_device *dev); void virtio_gpu_release(struct drm_device *dev); int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file); void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); -/* virtio_gem.c */ -void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj); -int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev); -void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev); -int virtio_gpu_gem_create(struct drm_file *file, - struct drm_device *dev, - struct virtio_gpu_object_params *params, - struct drm_gem_object **obj_p, - uint32_t *handle_p); +/* virtgpu_gem.c */ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); void virtio_gpu_gem_object_close(struct drm_gem_object *obj, @@ -263,7 +255,7 @@ void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs); void virtio_gpu_array_put_free_work(struct work_struct *work); -/* virtio vg */ +/* virtgpu_vq.c */ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, @@ -287,10 +279,10 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, uint32_t scanout_id, uint32_t resource_id, uint32_t width, uint32_t height, uint32_t x, uint32_t y); -int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *obj, - struct virtio_gpu_mem_entry *ents, - unsigned int nents); +void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_mem_entry *ents, + unsigned int nents); int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev); void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, @@ -343,17 +335,17 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work); void virtio_gpu_notify(struct virtio_gpu_device *vgdev); -/* virtio_gpu_display.c */ +/* virtgpu_display.c */ void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); -/* virtio_gpu_plane.c */ +/* virtgpu_plane.c */ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc); struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, enum drm_plane_type type, int index); -/* virtio_gpu_fence.c */ +/* virtgpu_fence.c */ struct virtio_gpu_fence *virtio_gpu_fence_alloc( struct virtio_gpu_device *vgdev); void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, @@ -362,7 +354,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, u64 last_seq); -/* virtio_gpu_object */ +/* virtgpu_object.c */ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo); struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, size_t size); @@ -378,7 +370,7 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -/* virgl debugfs */ -int virtio_gpu_debugfs_init(struct drm_minor *minor); +/* virtgpu_debugfs.c */ +void virtio_gpu_debugfs_init(struct drm_minor *minor); #endif diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 0d6152c99a27..1025658be4df 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -28,11 +28,11 @@ #include "virtgpu_drv.h" -int virtio_gpu_gem_create(struct drm_file *file, - struct drm_device *dev, - struct virtio_gpu_object_params *params, - struct drm_gem_object **obj_p, - uint32_t *handle_p) +static int virtio_gpu_gem_create(struct drm_file *file, + struct drm_device *dev, + struct virtio_gpu_object_params *params, + struct drm_gem_object **obj_p, + uint32_t *handle_p) { struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_object *obj; @@ -114,7 +114,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct virtio_gpu_object_array *objs; if (!vgdev->has_virgl_3d) - return 0; + goto out_notify; objs = virtio_gpu_array_alloc(1); if (!objs) @@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs); +out_notify: virtio_gpu_notify(vgdev); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 336cc9143205..867c5e239d55 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -47,7 +47,6 @@ static void virtio_gpu_create_context(struct drm_device *dev, get_task_comm(dbgname, current); virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, strlen(dbgname), dbgname); - virtio_gpu_notify(vgdev); vfpriv->context_created = true; out_unlock: diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 2bfb13d1932e..6ccbd01cd888 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -123,15 +123,17 @@ bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, size_t size) { - struct virtio_gpu_object *bo; + struct virtio_gpu_object_shmem *shmem; + struct drm_gem_shmem_object *dshmem; - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) return NULL; - bo->base.base.funcs = &virtio_gpu_shmem_funcs; - bo->base.map_cached = true; - return &bo->base.base; + dshmem = &shmem->base.base; + dshmem->base.funcs = &virtio_gpu_shmem_funcs; + dshmem->map_cached = true; + return &dshmem->base; } static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, @@ -233,13 +235,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, return ret; } - ret = virtio_gpu_object_attach(vgdev, bo, ents, nents); - if (ret != 0) { - virtio_gpu_free_object(&shmem_obj->base); - return ret; - } + virtio_gpu_object_attach(vgdev, bo, ents, nents); - virtio_gpu_notify(vgdev); *bo_ptr = bo; return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 73854915ec34..9e663a5d9952 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -1087,14 +1087,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } -int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *obj, - struct virtio_gpu_mem_entry *ents, - unsigned int nents) +void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_mem_entry *ents, + unsigned int nents) { virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, ents, nents, NULL); - return 0; } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 860de052e820..eef85f1a0ce5 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -21,6 +21,7 @@ #include <drm/drm_file.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -63,7 +64,6 @@ static void vkms_release(struct drm_device *dev) platform_device_unregister(vkms->platform); drm_atomic_helper_shutdown(&vkms->drm); drm_mode_config_cleanup(&vkms->drm); - drm_dev_fini(&vkms->drm); destroy_workqueue(vkms->output.composer_workq); } @@ -158,13 +158,14 @@ static int __init vkms_init(void) &vkms_device->platform->dev); if (ret) goto out_unregister; + drmm_add_final_kfree(&vkms_device->drm, vkms_device); ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, DMA_BIT_MASK(64)); if (ret) { DRM_ERROR("Could not initialize DMA support\n"); - goto out_fini; + goto out_put; } vkms_device->drm.irq_enabled = true; @@ -172,25 +173,25 @@ static int __init vkms_init(void) ret = drm_vblank_init(&vkms_device->drm, 1); if (ret) { DRM_ERROR("Failed to vblank\n"); - goto out_fini; + goto out_put; } ret = vkms_modeset_init(vkms_device); if (ret) - goto out_fini; + goto out_put; ret = drm_dev_register(&vkms_device->drm, 0); if (ret) - goto out_fini; + goto out_put; return 0; -out_fini: - drm_dev_fini(&vkms_device->drm); +out_put: + drm_dev_put(&vkms_device->drm); + return ret; out_unregister: platform_device_unregister(vkms_device->platform); - out_free: kfree(vkms_device); return ret; @@ -205,8 +206,6 @@ static void __exit vkms_exit(void) drm_dev_unregister(&vkms_device->drm); drm_dev_put(&vkms_device->drm); - - kfree(vkms_device); } module_init(vkms_init); diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index fb1941a6522c..85afb77e97f0 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -3,6 +3,7 @@ #include "vkms_drv.h" #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> static void vkms_connector_destroy(struct drm_connector *connector) { @@ -17,10 +18,6 @@ static const struct drm_connector_funcs vkms_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static const struct drm_encoder_funcs vkms_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int vkms_conn_get_modes(struct drm_connector *connector) { int count; @@ -70,8 +67,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) drm_connector_helper_add(connector, &vkms_conn_helper_funcs); - ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL, NULL); + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); if (ret) { DRM_ERROR("Failed to init encoder\n"); goto err_encoder; diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 5c3515e8cce1..31f85f09f1fc 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -11,4 +11,5 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ ttm_object.o ttm_lock.o +vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 71e45b568511..c2247a893ed4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1247,6 +1247,18 @@ static void vmw_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static unsigned long +vmw_get_unmapped_area(struct file *file, unsigned long uaddr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct drm_file *file_priv = file->private_data; + struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); + + return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, + &dev_priv->vma_manager); +} + static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr) { @@ -1418,6 +1430,7 @@ static const struct file_operations vmwgfx_driver_fops = { .compat_ioctl = vmw_compat_ioctl, #endif .llseek = noop_llseek, + .get_unmapped_area = vmw_get_unmapped_area, }; static struct drm_driver driver = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5ddbcb9f6df4..8cdcd6e5f9e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1000,6 +1000,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran); + /** * TTM buffer object driver - vmwgfx_ttm_buffer.c */ @@ -1510,6 +1511,17 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, pgoff_t start, pgoff_t end); vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, + enum page_entry_size pe_size); +#endif + +/* Transparent hugepage support - vmwgfx_thp.c */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern const struct ttm_mem_type_manager_func vmw_thp_func; +#else +#define vmw_thp_func ttm_bo_manager_func +#endif /** * VMW_DEBUG_KMS - Debug output for kernel mode-setting diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 60cfbfadd3f2..d4d66532f9c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -473,11 +473,11 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) * a lot of unnecessary write faults. */ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) - prot = vma->vm_page_prot; + prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); else prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); + ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -486,3 +486,75 @@ out_unlock: return ret; } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, + enum page_entry_size pe_size) +{ + struct vm_area_struct *vma = vmf->vma; + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + struct vmw_buffer_object *vbo = + container_of(bo, struct vmw_buffer_object, base); + pgprot_t prot; + vm_fault_t ret; + pgoff_t fault_page_size; + bool write = vmf->flags & FAULT_FLAG_WRITE; + bool is_cow_mapping = + (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; + + switch (pe_size) { + case PE_SIZE_PMD: + fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; + break; +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + case PE_SIZE_PUD: + fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; + break; +#endif + default: + WARN_ON_ONCE(1); + return VM_FAULT_FALLBACK; + } + + /* Always do write dirty-tracking and COW on PTE level. */ + if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping)) + return VM_FAULT_FALLBACK; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (vbo->dirty) { + pgoff_t allowed_prefault; + unsigned long page_offset; + + page_offset = vmf->pgoff - + drm_vma_node_start(&bo->base.vma_node); + if (page_offset >= bo->num_pages || + vmw_resources_clean(vbo, page_offset, + page_offset + PAGE_SIZE, + &allowed_prefault)) { + ret = VM_FAULT_SIGBUS; + goto out_unlock; + } + + /* + * Write protect, so we get a new fault on write, and can + * split. + */ + prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); + } else { + prot = vm_get_page_prot(vma->vm_flags); + } + + ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +out_unlock: + dma_resv_unlock(bo->base.resv); + + return ret; +} +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c new file mode 100644 index 000000000000..b7c816ba7166 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Huge page-table-entry support for IO memory. + * + * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd. + */ +#include "vmwgfx_drv.h" +#include <drm/ttm/ttm_module.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> + +/** + * struct vmw_thp_manager - Range manager implementing huge page alignment + * + * @mm: The underlying range manager. Protected by @lock. + * @lock: Manager lock. + */ +struct vmw_thp_manager { + struct drm_mm mm; + spinlock_t lock; +}; + +static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long align_pages, + const struct ttm_place *place, + struct ttm_mem_reg *mem, + unsigned long lpfn, + enum drm_mm_insert_mode mode) +{ + if (align_pages >= mem->page_alignment && + (!mem->page_alignment || align_pages % mem->page_alignment == 0)) { + return drm_mm_insert_node_in_range(mm, node, + mem->num_pages, + align_pages, 0, + place->fpfn, lpfn, mode); + } + + return -ENOSPC; +} + +static int vmw_thp_get_node(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + struct drm_mm *mm = &rman->mm; + struct drm_mm_node *node; + unsigned long align_pages; + unsigned long lpfn; + enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST; + int ret; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + lpfn = place->lpfn; + if (!lpfn) + lpfn = man->size; + + mode = DRM_MM_INSERT_BEST; + if (place->flags & TTM_PL_FLAG_TOPDOWN) + mode = DRM_MM_INSERT_HIGH; + + spin_lock(&rman->lock); + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { + align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); + if (mem->num_pages >= align_pages) { + ret = vmw_thp_insert_aligned(mm, node, align_pages, + place, mem, lpfn, mode); + if (!ret) + goto found_unlock; + } + } + + align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); + if (mem->num_pages >= align_pages) { + ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem, + lpfn, mode); + if (!ret) + goto found_unlock; + } + + ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, + mem->page_alignment, 0, + place->fpfn, lpfn, mode); +found_unlock: + spin_unlock(&rman->lock); + + if (unlikely(ret)) { + kfree(node); + } else { + mem->mm_node = node; + mem->start = node->start; + } + + return 0; +} + + + +static void vmw_thp_put_node(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + + if (mem->mm_node) { + spin_lock(&rman->lock); + drm_mm_remove_node(mem->mm_node); + spin_unlock(&rman->lock); + + kfree(mem->mm_node); + mem->mm_node = NULL; + } +} + +static int vmw_thp_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + struct vmw_thp_manager *rman; + + rman = kzalloc(sizeof(*rman), GFP_KERNEL); + if (!rman) + return -ENOMEM; + + drm_mm_init(&rman->mm, 0, p_size); + spin_lock_init(&rman->lock); + man->priv = rman; + return 0; +} + +static int vmw_thp_takedown(struct ttm_mem_type_manager *man) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + struct drm_mm *mm = &rman->mm; + + spin_lock(&rman->lock); + if (drm_mm_clean(mm)) { + drm_mm_takedown(mm); + spin_unlock(&rman->lock); + kfree(rman); + man->priv = NULL; + return 0; + } + spin_unlock(&rman->lock); + return -EBUSY; +} + +static void vmw_thp_debug(struct ttm_mem_type_manager *man, + struct drm_printer *printer) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + + spin_lock(&rman->lock); + drm_mm_print(&rman->mm, printer); + spin_unlock(&rman->lock); +} + +const struct ttm_mem_type_manager_func vmw_thp_func = { + .init = vmw_thp_init, + .takedown = vmw_thp_takedown, + .get_node = vmw_thp_get_node, + .put_node = vmw_thp_put_node, + .debug = vmw_thp_debug +}; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 3f3b2c7a208a..bf0bc4697959 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -749,7 +749,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */ - man->func = &ttm_bo_manager_func; + man->func = &vmw_thp_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_CACHED; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index aa7e50f63b94..3c03b1746661 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) .page_mkwrite = vmw_bo_vm_mkwrite, .fault = vmw_bo_vm_fault, .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close + .close = ttm_bo_vm_close, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + .huge_fault = vmw_bo_vm_huge_fault, +#endif }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 4be49c1aef51..1fd458e877ca 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -401,7 +401,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp, obj = xen_drm_front_gem_create(dev, args->size); if (IS_ERR_OR_NULL(obj)) { - ret = PTR_ERR(obj); + ret = PTR_ERR_OR_ZERO(obj); goto fail; } @@ -460,9 +460,6 @@ static void xen_drm_drv_release(struct drm_device *dev) drm_atomic_helper_shutdown(dev); drm_mode_config_cleanup(dev); - drm_dev_fini(dev); - kfree(dev); - if (front_info->cfg.be_alloc) xenbus_switch_state(front_info->xb_dev, XenbusStateInitialising); @@ -561,6 +558,7 @@ fail_register: fail_modeset: drm_kms_helper_poll_fini(drm_dev); drm_mode_config_cleanup(drm_dev); + drm_dev_put(drm_dev); fail: kfree(drm_info); return ret; diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c index b98a1420dcd3..76a16d997a23 100644 --- a/drivers/gpu/drm/zte/zx_hdmi.c +++ b/drivers/gpu/drm/zte/zx_hdmi.c @@ -20,6 +20,7 @@ #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> +#include <drm/drm_simple_kms_helper.h> #include <sound/hdmi-codec.h> @@ -254,10 +255,6 @@ static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = { .mode_set = zx_hdmi_encoder_mode_set, }; -static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int zx_hdmi_connector_get_modes(struct drm_connector *connector) { struct zx_hdmi *hdmi = to_zx_hdmi(connector); @@ -313,8 +310,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi) encoder->possible_crtcs = VOU_CRTC_MASK; - drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs); hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c index c598b7daf1f1..d8a89ba383bc 100644 --- a/drivers/gpu/drm/zte/zx_tvenc.c +++ b/drivers/gpu/drm/zte/zx_tvenc.c @@ -14,6 +14,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "zx_drm_drv.h" #include "zx_tvenc_regs.h" @@ -218,10 +219,6 @@ static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = { .mode_set = zx_tvenc_encoder_mode_set, }; -static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int zx_tvenc_connector_get_modes(struct drm_connector *connector) { struct zx_tvenc *tvenc = to_zx_tvenc(connector); @@ -285,8 +282,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc) */ encoder->possible_crtcs = BIT(1); - drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs, - DRM_MODE_ENCODER_TVDAC, NULL); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs); connector->interlace_allowed = true; diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c index c4fa3bbaba78..a7ed7f5ca837 100644 --- a/drivers/gpu/drm/zte/zx_vga.c +++ b/drivers/gpu/drm/zte/zx_vga.c @@ -14,6 +14,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> #include "zx_drm_drv.h" #include "zx_vga_regs.h" @@ -72,10 +73,6 @@ static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = { .disable = zx_vga_encoder_disable, }; -static const struct drm_encoder_funcs zx_vga_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - static int zx_vga_connector_get_modes(struct drm_connector *connector) { struct zx_vga *vga = to_zx_vga(connector); @@ -154,8 +151,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga) encoder->possible_crtcs = VOU_CRTC_MASK; - ret = drm_encoder_init(drm, encoder, &zx_vga_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); + ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC); if (ret) { DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret); return ret; diff --git a/drivers/gpu/trace/Kconfig b/drivers/gpu/trace/Kconfig new file mode 100644 index 000000000000..c24e9edd022e --- /dev/null +++ b/drivers/gpu/trace/Kconfig @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config TRACE_GPU_MEM + bool diff --git a/drivers/gpu/trace/Makefile b/drivers/gpu/trace/Makefile new file mode 100644 index 000000000000..b70fbdc5847f --- /dev/null +++ b/drivers/gpu/trace/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_TRACE_GPU_MEM) += trace_gpu_mem.o diff --git a/drivers/gpu/trace/trace_gpu_mem.c b/drivers/gpu/trace/trace_gpu_mem.c new file mode 100644 index 000000000000..01e855897b6d --- /dev/null +++ b/drivers/gpu/trace/trace_gpu_mem.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * GPU memory trace points + * + * Copyright (C) 2020 Google, Inc. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/gpu_mem.h> + +EXPORT_TRACEPOINT_SYMBOL(gpu_mem_total); |