diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-09-02 14:33:42 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-09-02 14:33:42 +0200 |
commit | e93c28f39375558409329a02a767d5cadfcc4a31 (patch) | |
tree | 9f1b4b5ce765b887b6002cded59fc934e6c9c012 /drivers/gpu/drm | |
parent | 85a62bf9d8ef8d533635270ae985281c58e8c974 (diff) | |
parent | 6fa2d197936ba0b8936e813d0adecefac160062b (diff) |
Merge tag 'drm-intel-next-fixes-2015-09-02' into drm-intel-next-queued
Backmerge -fixes since there's more DDI-E related cleanups on top of
the pile of -fixes for skl that just landed for 4.3.
Conflicts:
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i914/intel_dp.c
drivers/gpu/drm/i915/intel_lrc.c
Conflicts are all fairly harmless adjacent line stuff.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm')
360 files changed, 45394 insertions, 14580 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c46ca311d8c3..1a0a8df2eed8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -37,9 +37,29 @@ config DRM_KMS_FB_HELPER select FB select FRAMEBUFFER_CONSOLE if !EXPERT select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE + select FB_SYS_FOPS + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT help FBDEV helpers for KMS drivers. +config DRM_FBDEV_EMULATION + bool "Enable legacy fbdev support for your modesetting driver" + depends on DRM + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + default y + help + Choose this option if you have a need for the legacy fbdev + support. Note that this support also provides the linux console + support on top of your modesetting driver. + + If in doubt, say "Y". + config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" depends on DRM_KMS_HELPER @@ -79,8 +99,6 @@ config DRM_KMS_CMA_HELPER source "drivers/gpu/drm/i2c/Kconfig" -source "drivers/gpu/drm/bridge/Kconfig" - config DRM_TDFX tristate "3dfx Banshee/Voodoo3+" depends on DRM && PCI @@ -110,6 +128,7 @@ config DRM_RADEON select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT select INTERVAL_TREE help Choose this option if you have an ATI Radeon graphics card. There @@ -133,6 +152,7 @@ config DRM_AMDGPU select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT select INTERVAL_TREE help Choose this option if you have a recent AMD Radeon graphics card. @@ -231,10 +251,14 @@ source "drivers/gpu/drm/virtio/Kconfig" source "drivers/gpu/drm/msm/Kconfig" +source "drivers/gpu/drm/fsl-dcu/Kconfig" + source "drivers/gpu/drm/tegra/Kconfig" source "drivers/gpu/drm/panel/Kconfig" +source "drivers/gpu/drm/bridge/Kconfig" + source "drivers/gpu/drm/sti/Kconfig" source "drivers/gpu/drm/amd/amdkfd/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 5713d0534504..45e7719846b1 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -23,7 +23,7 @@ drm-$(CONFIG_OF) += drm_of.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o -drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o +drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o @@ -70,3 +70,4 @@ obj-$(CONFIG_DRM_IMX) += imx/ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ +obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 908360584e4d..04c270757030 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -3,7 +3,9 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \ - -Idrivers/gpu/drm/amd/include + -Idrivers/gpu/drm/amd/include \ + -Idrivers/gpu/drm/amd/amdgpu \ + -Idrivers/gpu/drm/amd/scheduler amdgpu-y := amdgpu_drv.o @@ -21,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ - ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o + ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ + amdgpu_amdkfd_gfx_v7.o amdgpu-y += \ vi.o @@ -43,6 +46,7 @@ amdgpu-y += \ amdgpu_dpm.o \ cz_smc.o cz_dpm.o \ tonga_smc.o tonga_dpm.o \ + fiji_smc.o fiji_dpm.o \ iceland_smc.o iceland_dpm.o # add DCE block @@ -74,9 +78,17 @@ amdgpu-y += \ # add amdkfd interfaces amdgpu-y += \ amdgpu_amdkfd.o \ - amdgpu_amdkfd_gfx_v7.o \ amdgpu_amdkfd_gfx_v8.o +# add cgs +amdgpu-y += amdgpu_cgs.o + +# GPU scheduler +amdgpu-y += \ + ../scheduler/gpu_scheduler.o \ + ../scheduler/sched_fence.o \ + amdgpu_sched.o + amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index baefa635169a..2fc58e658986 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -42,17 +42,19 @@ #include <ttm/ttm_module.h> #include <ttm/ttm_execbuf_util.h> +#include <drm/drmP.h> #include <drm/drm_gem.h> #include <drm/amdgpu_drm.h> #include "amd_shared.h" -#include "amdgpu_family.h" #include "amdgpu_mode.h" #include "amdgpu_ih.h" #include "amdgpu_irq.h" #include "amdgpu_ucode.h" #include "amdgpu_gds.h" +#include "gpu_scheduler.h" + /* * Modules parameters. */ @@ -77,7 +79,11 @@ extern int amdgpu_bapm; extern int amdgpu_deep_color; extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; +extern int amdgpu_enable_scheduler; +extern int amdgpu_sched_jobs; +extern int amdgpu_sched_hw_submission; +#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ @@ -178,6 +184,7 @@ struct amdgpu_ring; struct amdgpu_semaphore; struct amdgpu_cs_parser; struct amdgpu_irq_src; +struct amdgpu_fpriv; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_EOP = 0, @@ -381,10 +388,10 @@ struct amdgpu_fence_driver { uint64_t sync_seq[AMDGPU_MAX_RINGS]; atomic64_t last_seq; bool initialized; - bool delayed_irq; struct amdgpu_irq_src *irq_src; unsigned irq_type; struct delayed_work lockup_work; + wait_queue_head_t fence_queue; }; /* some special values for the owner field */ @@ -423,20 +430,18 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); +void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); +void amdgpu_fence_driver_resume(struct amdgpu_device *adev); int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, struct amdgpu_fence **fence); -int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, - uint64_t seq, struct amdgpu_fence **fence); void amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); -bool amdgpu_fence_signaled(struct amdgpu_fence *fence); -int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible); -int amdgpu_fence_wait_any(struct amdgpu_device *adev, +signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, struct amdgpu_fence **fences, - bool intr); + bool intr, long t); struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); void amdgpu_fence_unref(struct amdgpu_fence **fence); @@ -481,7 +486,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a, return a->seq < b->seq; } -int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, +int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, void *owner, struct amdgpu_fence **fence); /* @@ -532,14 +537,16 @@ struct amdgpu_bo_va_mapping { struct amdgpu_bo_va { /* protected by bo being reserved */ struct list_head bo_list; - uint64_t addr; - struct amdgpu_fence *last_pt_update; + struct fence *last_pt_update; unsigned ref_count; - /* protected by vm mutex */ - struct list_head mappings; + /* protected by vm mutex and spinlock */ struct list_head vm_status; + /* mappings for this bo_va */ + struct list_head invalids; + struct list_head valids; + /* constant after initialization */ struct amdgpu_vm *vm; struct amdgpu_bo *bo; @@ -697,8 +704,8 @@ struct amdgpu_sync { }; void amdgpu_sync_create(struct amdgpu_sync *sync); -void amdgpu_sync_fence(struct amdgpu_sync *sync, - struct amdgpu_fence *fence); +int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct fence *f); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, @@ -821,7 +828,9 @@ struct amdgpu_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_rbo; - struct fence *fence; + struct fence *excl; + unsigned shared_count; + struct fence **shared; }; @@ -844,6 +853,8 @@ struct amdgpu_ib { uint32_t gws_base, gws_size; uint32_t oa_base, oa_size; uint32_t flags; + /* resulting sequence number */ + uint64_t sequence; }; enum amdgpu_ring_type { @@ -854,11 +865,23 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_VCE }; +extern struct amd_sched_backend_ops amdgpu_sched_ops; + +int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_ib *ibs, + unsigned num_ibs, + int (*free_job)(struct amdgpu_cs_parser *), + void *owner, + struct fence **fence); + struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; + struct amd_gpu_scheduler *scheduler; + spinlock_t fence_lock; struct mutex *ring_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; @@ -892,6 +915,7 @@ struct amdgpu_ring { struct amdgpu_ctx *current_ctx; enum amdgpu_ring_type type; char name[16]; + bool is_pte_ring; }; /* @@ -943,18 +967,22 @@ struct amdgpu_vm { struct rb_root va; - /* protecting invalidated and freed */ + /* protecting invalidated */ spinlock_t status_lock; /* BOs moved, but not yet updated in the PT */ struct list_head invalidated; - /* BOs freed, but not yet updated in the PT */ + /* BOs cleared in the PT because of a move */ + struct list_head cleared; + + /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; /* contains the page directory */ struct amdgpu_bo *page_directory; unsigned max_pde_used; + struct fence *page_directory_fence; /* array of page tables, one for each page directory entry */ struct amdgpu_vm_pt *page_tables; @@ -983,27 +1011,47 @@ struct amdgpu_vm_manager { * context related structures */ -struct amdgpu_ctx_state { - uint64_t flags; - uint32_t hangs; +#define AMDGPU_CTX_MAX_CS_PENDING 16 + +struct amdgpu_ctx_ring { + uint64_t sequence; + struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; + struct amd_sched_entity entity; }; struct amdgpu_ctx { - /* call kref_get()before CS start and kref_put() after CS fence signaled */ - struct kref refcount; - struct amdgpu_fpriv *fpriv; - struct amdgpu_ctx_state state; - uint32_t id; - unsigned reset_counter; + struct kref refcount; + struct amdgpu_device *adev; + unsigned reset_counter; + spinlock_t ring_lock; + struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; }; struct amdgpu_ctx_mgr { - struct amdgpu_device *adev; - struct idr ctx_handles; - /* lock for IDR system */ - struct mutex lock; + struct amdgpu_device *adev; + struct mutex lock; + /* protected by lock */ + struct idr ctx_handles; }; +int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, + struct amdgpu_ctx *ctx); +void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); + +struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); +int amdgpu_ctx_put(struct amdgpu_ctx *ctx); + +uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, + struct fence *fence, uint64_t queued_seq); +struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct amdgpu_ring *ring, uint64_t seq); + +int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); + /* * file private structure */ @@ -1012,7 +1060,7 @@ struct amdgpu_fpriv { struct amdgpu_vm vm; struct mutex bo_list_lock; struct idr bo_list_handles; - struct amdgpu_ctx_mgr ctx_mgr; + struct amdgpu_ctx_mgr ctx_mgr; }; /* @@ -1030,6 +1078,8 @@ struct amdgpu_bo_list { }; struct amdgpu_bo_list * +amdgpu_bo_list_clone(struct amdgpu_bo_list *list); +struct amdgpu_bo_list * amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); void amdgpu_bo_list_put(struct amdgpu_bo_list *list); void amdgpu_bo_list_free(struct amdgpu_bo_list *list); @@ -1205,6 +1255,14 @@ struct amdgpu_cs_parser { /* user fence */ struct amdgpu_user_fence uf; + + struct amdgpu_ring *ring; + struct mutex job_lock; + struct work_struct job_work; + int (*prepare_job)(struct amdgpu_cs_parser *sched_job); + int (*run_job)(struct amdgpu_cs_parser *sched_job); + int (*free_job)(struct amdgpu_cs_parser *sched_job); + struct amd_sched_fence *s_fence; }; static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) @@ -1849,17 +1907,12 @@ struct amdgpu_atcs { struct amdgpu_atcs_functions functions; }; -int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv, - uint32_t *id,uint32_t flags); -int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, - uint32_t id); - -void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv); -struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); -int amdgpu_ctx_put(struct amdgpu_ctx *ctx); +/* + * CGS + */ +void *amdgpu_cgs_create_device(struct amdgpu_device *adev); +void amdgpu_cgs_destroy_device(void *cgs_device); -extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); /* * Core structure, functions and helpers. @@ -1883,7 +1936,7 @@ struct amdgpu_device { struct rw_semaphore exclusive_lock; /* ASIC */ - enum amdgpu_asic_type asic_type; + enum amd_asic_type asic_type; uint32_t family; uint32_t rev_id; uint32_t external_rev_id; @@ -1976,7 +2029,6 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - wait_queue_head_t fence_queue; unsigned fence_context; struct mutex ring_lock; unsigned num_rings; @@ -2028,6 +2080,9 @@ struct amdgpu_device { /* amdkfd interface */ struct kfd_dev *kfd; + + /* kernel conext for IB submission */ + struct amdgpu_ctx kernel_ctx; }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2215,6 +2270,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); +struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, + struct drm_file *filp, + struct amdgpu_ctx *ctx, + struct amdgpu_ib *ibs, + uint32_t num_ibs); + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, @@ -2278,8 +2339,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct list_head *head); -struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, - struct amdgpu_vm *vm); +int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync); void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_vm *vm, struct amdgpu_fence *updates); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bc763e0c8f4c..496ed2192eba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -21,7 +21,7 @@ */ #include "amdgpu_amdkfd.h" -#include "amdgpu_family.h" +#include "amd_shared.h" #include <drm/drmP.h> #include "amdgpu.h" #include <linux/module.h> @@ -50,9 +50,11 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev) #endif switch (rdev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); break; +#endif case CHIP_CARRIZO: kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 2daad335b809..dd2037bc0b4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -450,7 +450,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, while (true) { temp = RREG32(mmCP_HQD_ACTIVE); - if (temp & CP_HQD_ACTIVE__ACTIVE__SHIFT) + if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) break; if (timeout == 0) { pr_err("kfd: cp queue preemption time out (%dms)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 6a588371d54a..77f1d7c6ea3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -897,7 +897,7 @@ bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev, if ((id == ASIC_INTERNAL_ENGINE_SS) || (id == ASIC_INTERNAL_MEMORY_SS)) ss->rate /= 100; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) amdgpu_atombios_get_igp_ss_overrides(adev, ss, id); return true; } @@ -1058,7 +1058,7 @@ void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, SET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 2742b9a35cbc..759482e4300d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -42,7 +42,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); if (r) goto exit_do_move; - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) goto exit_do_move; amdgpu_fence_unref(&fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index ceb444f6d418..02add0a508cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -48,7 +48,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) resource_size_t vram_base; resource_size_t size = 256 * 1024; /* ??? */ - if (!(adev->flags & AMDGPU_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) if (!amdgpu_card_posted(adev)) return false; @@ -184,7 +184,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) bool found = false; /* ATRM is for the discrete card only */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return false; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { @@ -246,7 +246,7 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) { - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return igp_read_bios_from_vram(adev); else return amdgpu_asic_read_disabled_bios(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f82a2dd83874..7eed523bf28f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -62,6 +62,39 @@ static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, return 0; } +struct amdgpu_bo_list * +amdgpu_bo_list_clone(struct amdgpu_bo_list *list) +{ + struct amdgpu_bo_list *result; + unsigned i; + + result = kmalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); + if (!result) + return NULL; + + result->array = drm_calloc_large(list->num_entries, + sizeof(struct amdgpu_bo_list_entry)); + if (!result->array) { + kfree(result); + return NULL; + } + + mutex_init(&result->lock); + result->gds_obj = list->gds_obj; + result->gws_obj = list->gws_obj; + result->oa_obj = list->oa_obj; + result->has_userptr = list->has_userptr; + result->num_entries = list->num_entries; + + memcpy(result->array, list->array, list->num_entries * + sizeof(struct amdgpu_bo_list_entry)); + + for (i = 0; i < result->num_entries; ++i) + amdgpu_bo_ref(result->array[i].robj); + + return result; +} + static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) { struct amdgpu_bo_list *list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c new file mode 100644 index 000000000000..6b1243f9f86d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -0,0 +1,838 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <drm/drmP.h> +#include <linux/firmware.h> +#include <drm/amdgpu_drm.h> +#include "amdgpu.h" +#include "cgs_linux.h" +#include "atom.h" +#include "amdgpu_ucode.h" + + +struct amdgpu_cgs_device { + struct cgs_device base; + struct amdgpu_device *adev; +}; + +#define CGS_FUNC_ADEV \ + struct amdgpu_device *adev = \ + ((struct amdgpu_cgs_device *)cgs_device)->adev + +static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type, + uint64_t *mc_start, uint64_t *mc_size, + uint64_t *mem_size) +{ + CGS_FUNC_ADEV; + switch(type) { + case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__VISIBLE_FB: + *mc_start = 0; + *mc_size = adev->mc.visible_vram_size; + *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size; + break; + case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__INVISIBLE_FB: + *mc_start = adev->mc.visible_vram_size; + *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size; + *mem_size = *mc_size; + break; + case CGS_GPU_MEM_TYPE__GART_CACHEABLE: + case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: + *mc_start = adev->mc.gtt_start; + *mc_size = adev->mc.gtt_size; + *mem_size = adev->mc.gtt_size - adev->gart_pin_size; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, + uint64_t size, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *kmem_handle, uint64_t *mcaddr) +{ + CGS_FUNC_ADEV; + int ret; + struct amdgpu_bo *bo; + struct page *kmem_page = vmalloc_to_page(kmem); + int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; + + struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); + ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, + AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); + if (ret) + return ret; + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret != 0)) + return ret; + + /* pin buffer into GTT */ + ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT, + min_offset, max_offset, mcaddr); + amdgpu_bo_unreserve(bo); + + *kmem_handle = (cgs_handle_t)bo; + return ret; +} + +static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle) +{ + struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; + + if (obj) { + int r = amdgpu_bo_reserve(obj, false); + if (likely(r == 0)) { + amdgpu_bo_unpin(obj); + amdgpu_bo_unreserve(obj); + } + amdgpu_bo_unref(&obj); + + } + return 0; +} + +static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, + enum cgs_gpu_mem_type type, + uint64_t size, uint64_t align, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *handle) +{ + CGS_FUNC_ADEV; + uint16_t flags = 0; + int ret = 0; + uint32_t domain = 0; + struct amdgpu_bo *obj; + struct ttm_placement placement; + struct ttm_place place; + + if (min_offset > max_offset) { + BUG_ON(1); + return -EINVAL; + } + + /* fail if the alignment is not a power of 2 */ + if (((align != 1) && (align & (align - 1))) + || size == 0 || align == 0) + return -EINVAL; + + + switch(type) { + case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__VISIBLE_FB: + flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (max_offset > adev->mc.real_vram_size) + return -EINVAL; + place.fpfn = min_offset >> PAGE_SHIFT; + place.lpfn = max_offset >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + break; + case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__INVISIBLE_FB: + flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { + place.fpfn = + max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT; + place.lpfn = + min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + } + + break; + case CGS_GPU_MEM_TYPE__GART_CACHEABLE: + domain = AMDGPU_GEM_DOMAIN_GTT; + place.fpfn = min_offset >> PAGE_SHIFT; + place.lpfn = max_offset >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + break; + case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: + flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; + domain = AMDGPU_GEM_DOMAIN_GTT; + place.fpfn = min_offset >> PAGE_SHIFT; + place.lpfn = max_offset >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | + TTM_PL_FLAG_UNCACHED; + break; + default: + return -EINVAL; + } + + + *handle = 0; + + placement.placement = &place; + placement.num_placement = 1; + placement.busy_placement = &place; + placement.num_busy_placement = 1; + + ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, + true, domain, flags, + NULL, &placement, &obj); + if (ret) { + DRM_ERROR("(%d) bo create failed\n", ret); + return ret; + } + *handle = (cgs_handle_t)obj; + + return ret; +} + +static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, + cgs_handle_t *handle) +{ + CGS_FUNC_ADEV; + int r; + uint32_t dma_handle; + struct drm_gem_object *obj; + struct amdgpu_bo *bo; + struct drm_device *dev = adev->ddev; + struct drm_file *file_priv = NULL, *priv; + + mutex_lock(&dev->struct_mutex); + list_for_each_entry(priv, &dev->filelist, lhead) { + rcu_read_lock(); + if (priv->pid == get_pid(task_pid(current))) + file_priv = priv; + rcu_read_unlock(); + if (file_priv) + break; + } + mutex_unlock(&dev->struct_mutex); + r = dev->driver->prime_fd_to_handle(dev, + file_priv, dmabuf_fd, + &dma_handle); + spin_lock(&file_priv->table_lock); + + /* Check if we currently have a reference on the object */ + obj = idr_find(&file_priv->object_idr, dma_handle); + if (obj == NULL) { + spin_unlock(&file_priv->table_lock); + return -EINVAL; + } + spin_unlock(&file_priv->table_lock); + bo = gem_to_amdgpu_bo(obj); + *handle = (cgs_handle_t)bo; + return 0; +} + +static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) +{ + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + + if (obj) { + int r = amdgpu_bo_reserve(obj, false); + if (likely(r == 0)) { + amdgpu_bo_kunmap(obj); + amdgpu_bo_unpin(obj); + amdgpu_bo_unreserve(obj); + } + amdgpu_bo_unref(&obj); + + } + return 0; +} + +static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle, + uint64_t *mcaddr) +{ + int r; + u64 min_offset, max_offset; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + + WARN_ON_ONCE(obj->placement.num_placement > 1); + + min_offset = obj->placements[0].fpfn << PAGE_SHIFT; + max_offset = obj->placements[0].lpfn << PAGE_SHIFT; + + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, + min_offset, max_offset, mcaddr); + amdgpu_bo_unreserve(obj); + return r; +} + +static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) +{ + int r; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + r = amdgpu_bo_unpin(obj); + amdgpu_bo_unreserve(obj); + return r; +} + +static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle, + void **map) +{ + int r; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + r = amdgpu_bo_kmap(obj, map); + amdgpu_bo_unreserve(obj); + return r; +} + +static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) +{ + int r; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + amdgpu_bo_kunmap(obj); + amdgpu_bo_unreserve(obj); + return r; +} + +static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset) +{ + CGS_FUNC_ADEV; + return RREG32(offset); +} + +static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset, + uint32_t value) +{ + CGS_FUNC_ADEV; + WREG32(offset, value); +} + +static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device, + enum cgs_ind_reg space, + unsigned index) +{ + CGS_FUNC_ADEV; + switch (space) { + case CGS_IND_REG__MMIO: + return RREG32_IDX(index); + case CGS_IND_REG__PCIE: + return RREG32_PCIE(index); + case CGS_IND_REG__SMC: + return RREG32_SMC(index); + case CGS_IND_REG__UVD_CTX: + return RREG32_UVD_CTX(index); + case CGS_IND_REG__DIDT: + return RREG32_DIDT(index); + case CGS_IND_REG__AUDIO_ENDPT: + DRM_ERROR("audio endpt register access not implemented.\n"); + return 0; + } + WARN(1, "Invalid indirect register space"); + return 0; +} + +static void amdgpu_cgs_write_ind_register(void *cgs_device, + enum cgs_ind_reg space, + unsigned index, uint32_t value) +{ + CGS_FUNC_ADEV; + switch (space) { + case CGS_IND_REG__MMIO: + return WREG32_IDX(index, value); + case CGS_IND_REG__PCIE: + return WREG32_PCIE(index, value); + case CGS_IND_REG__SMC: + return WREG32_SMC(index, value); + case CGS_IND_REG__UVD_CTX: + return WREG32_UVD_CTX(index, value); + case CGS_IND_REG__DIDT: + return WREG32_DIDT(index, value); + case CGS_IND_REG__AUDIO_ENDPT: + DRM_ERROR("audio endpt register access not implemented.\n"); + return; + } + WARN(1, "Invalid indirect register space"); +} + +static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr) +{ + CGS_FUNC_ADEV; + uint8_t val; + int ret = pci_read_config_byte(adev->pdev, addr, &val); + if (WARN(ret, "pci_read_config_byte error")) + return 0; + return val; +} + +static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr) +{ + CGS_FUNC_ADEV; + uint16_t val; + int ret = pci_read_config_word(adev->pdev, addr, &val); + if (WARN(ret, "pci_read_config_word error")) + return 0; + return val; +} + +static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device, + unsigned addr) +{ + CGS_FUNC_ADEV; + uint32_t val; + int ret = pci_read_config_dword(adev->pdev, addr, &val); + if (WARN(ret, "pci_read_config_dword error")) + return 0; + return val; +} + +static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr, + uint8_t value) +{ + CGS_FUNC_ADEV; + int ret = pci_write_config_byte(adev->pdev, addr, value); + WARN(ret, "pci_write_config_byte error"); +} + +static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr, + uint16_t value) +{ + CGS_FUNC_ADEV; + int ret = pci_write_config_word(adev->pdev, addr, value); + WARN(ret, "pci_write_config_word error"); +} + +static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, + uint32_t value) +{ + CGS_FUNC_ADEV; + int ret = pci_write_config_dword(adev->pdev, addr, value); + WARN(ret, "pci_write_config_dword error"); +} + +static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, + unsigned table, uint16_t *size, + uint8_t *frev, uint8_t *crev) +{ + CGS_FUNC_ADEV; + uint16_t data_start; + + if (amdgpu_atom_parse_data_header( + adev->mode_info.atom_context, table, size, + frev, crev, &data_start)) + return (uint8_t*)adev->mode_info.atom_context->bios + + data_start; + + return NULL; +} + +static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table, + uint8_t *frev, uint8_t *crev) +{ + CGS_FUNC_ADEV; + + if (amdgpu_atom_parse_cmd_header( + adev->mode_info.atom_context, table, + frev, crev)) + return 0; + + return -EINVAL; +} + +static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table, + void *args) +{ + CGS_FUNC_ADEV; + + return amdgpu_atom_execute_table( + adev->mode_info.atom_context, table, args); +} + +static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request, + int active) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request, + enum cgs_clock clock, unsigned freq) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request, + enum cgs_engine engine, int powered) +{ + /* TODO */ + return 0; +} + + + +static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device, + enum cgs_clock clock, + struct cgs_clock_limits *limits) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask, + const uint32_t *voltages) +{ + DRM_ERROR("not implemented"); + return -EPERM; +} + +struct cgs_irq_params { + unsigned src_id; + cgs_irq_source_set_func_t set; + cgs_irq_handler_func_t handler; + void *private_data; +}; + +static int cgs_set_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + struct cgs_irq_params *irq_params = + (struct cgs_irq_params *)src->data; + if (!irq_params) + return -EINVAL; + if (!irq_params->set) + return -EINVAL; + return irq_params->set(irq_params->private_data, + irq_params->src_id, + type, + (int)state); +} + +static int cgs_process_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct cgs_irq_params *irq_params = + (struct cgs_irq_params *)source->data; + if (!irq_params) + return -EINVAL; + if (!irq_params->handler) + return -EINVAL; + return irq_params->handler(irq_params->private_data, + irq_params->src_id, + entry->iv_entry); +} + +static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { + .set = cgs_set_irq_state, + .process = cgs_process_irq, +}; + +static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id, + unsigned num_types, + cgs_irq_source_set_func_t set, + cgs_irq_handler_func_t handler, + void *private_data) +{ + CGS_FUNC_ADEV; + int ret = 0; + struct cgs_irq_params *irq_params; + struct amdgpu_irq_src *source = + kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); + if (!source) + return -ENOMEM; + irq_params = + kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); + if (!irq_params) { + kfree(source); + return -ENOMEM; + } + source->num_types = num_types; + source->funcs = &cgs_irq_funcs; + irq_params->src_id = src_id; + irq_params->set = set; + irq_params->handler = handler; + irq_params->private_data = private_data; + source->data = (void *)irq_params; + ret = amdgpu_irq_add_id(adev, src_id, source); + if (ret) { + kfree(irq_params); + kfree(source); + } + + return ret; +} + +static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type) +{ + CGS_FUNC_ADEV; + return amdgpu_irq_get(adev, adev->irq.sources[src_id], type); +} + +static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type) +{ + CGS_FUNC_ADEV; + return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); +} + +int amdgpu_cgs_set_clockgating_state(void *cgs_device, + enum amd_ip_block_type block_type, + enum amd_clockgating_state state) +{ + CGS_FUNC_ADEV; + int i, r = -1; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->set_clockgating_state( + (void *)adev, + state); + break; + } + } + return r; +} + +int amdgpu_cgs_set_powergating_state(void *cgs_device, + enum amd_ip_block_type block_type, + enum amd_powergating_state state) +{ + CGS_FUNC_ADEV; + int i, r = -1; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->set_powergating_state( + (void *)adev, + state); + break; + } + } + return r; +} + + +static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type) +{ + CGS_FUNC_ADEV; + enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case CGS_UCODE_ID_SDMA0: + result = AMDGPU_UCODE_ID_SDMA0; + break; + case CGS_UCODE_ID_SDMA1: + result = AMDGPU_UCODE_ID_SDMA1; + break; + case CGS_UCODE_ID_CP_CE: + result = AMDGPU_UCODE_ID_CP_CE; + break; + case CGS_UCODE_ID_CP_PFP: + result = AMDGPU_UCODE_ID_CP_PFP; + break; + case CGS_UCODE_ID_CP_ME: + result = AMDGPU_UCODE_ID_CP_ME; + break; + case CGS_UCODE_ID_CP_MEC: + case CGS_UCODE_ID_CP_MEC_JT1: + result = AMDGPU_UCODE_ID_CP_MEC1; + break; + case CGS_UCODE_ID_CP_MEC_JT2: + if (adev->asic_type == CHIP_TONGA) + result = AMDGPU_UCODE_ID_CP_MEC2; + else if (adev->asic_type == CHIP_CARRIZO) + result = AMDGPU_UCODE_ID_CP_MEC1; + break; + case CGS_UCODE_ID_RLC_G: + result = AMDGPU_UCODE_ID_RLC_G; + break; + default: + DRM_ERROR("Firmware type not supported\n"); + } + return result; +} + +static int amdgpu_cgs_get_firmware_info(void *cgs_device, + enum cgs_ucode_id type, + struct cgs_firmware_info *info) +{ + CGS_FUNC_ADEV; + + if (CGS_UCODE_ID_SMU != type) { + uint64_t gpu_addr; + uint32_t data_size; + const struct gfx_firmware_header_v1_0 *header; + enum AMDGPU_UCODE_ID id; + struct amdgpu_firmware_info *ucode; + + id = fw_type_convert(cgs_device, type); + ucode = &adev->firmware.ucode[id]; + if (ucode->fw == NULL) + return -EINVAL; + + gpu_addr = ucode->mc_addr; + header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + data_size = le32_to_cpu(header->header.ucode_size_bytes); + + if ((type == CGS_UCODE_ID_CP_MEC_JT1) || + (type == CGS_UCODE_ID_CP_MEC_JT2)) { + gpu_addr += le32_to_cpu(header->jt_offset) << 2; + data_size = le32_to_cpu(header->jt_size) << 2; + } + info->mc_addr = gpu_addr; + info->image_size = data_size; + info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); + } else { + char fw_name[30] = {0}; + int err = 0; + uint32_t ucode_size; + uint32_t ucode_start_address; + const uint8_t *src; + const struct smc_firmware_header_v1_0 *hdr; + + switch (adev->asic_type) { + case CHIP_TONGA: + strcpy(fw_name, "amdgpu/tonga_smc.bin"); + break; + default: + DRM_ERROR("SMC firmware not supported\n"); + return -EINVAL; + } + + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) { + DRM_ERROR("Failed to request firmware\n"); + return err; + } + + err = amdgpu_ucode_validate(adev->pm.fw); + if (err) { + DRM_ERROR("Failed to load firmware \"%s\"", fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return err; + } + + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + src = (const uint8_t *)(adev->pm.fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + info->version = adev->pm.fw_version; + info->image_size = ucode_size; + info->kptr = (void *)src; + } + return 0; +} + +static const struct cgs_ops amdgpu_cgs_ops = { + amdgpu_cgs_gpu_mem_info, + amdgpu_cgs_gmap_kmem, + amdgpu_cgs_gunmap_kmem, + amdgpu_cgs_alloc_gpu_mem, + amdgpu_cgs_free_gpu_mem, + amdgpu_cgs_gmap_gpu_mem, + amdgpu_cgs_gunmap_gpu_mem, + amdgpu_cgs_kmap_gpu_mem, + amdgpu_cgs_kunmap_gpu_mem, + amdgpu_cgs_read_register, + amdgpu_cgs_write_register, + amdgpu_cgs_read_ind_register, + amdgpu_cgs_write_ind_register, + amdgpu_cgs_read_pci_config_byte, + amdgpu_cgs_read_pci_config_word, + amdgpu_cgs_read_pci_config_dword, + amdgpu_cgs_write_pci_config_byte, + amdgpu_cgs_write_pci_config_word, + amdgpu_cgs_write_pci_config_dword, + amdgpu_cgs_atom_get_data_table, + amdgpu_cgs_atom_get_cmd_table_revs, + amdgpu_cgs_atom_exec_cmd_table, + amdgpu_cgs_create_pm_request, + amdgpu_cgs_destroy_pm_request, + amdgpu_cgs_set_pm_request, + amdgpu_cgs_pm_request_clock, + amdgpu_cgs_pm_request_engine, + amdgpu_cgs_pm_query_clock_limits, + amdgpu_cgs_set_camera_voltages, + amdgpu_cgs_get_firmware_info, + amdgpu_cgs_set_powergating_state, + amdgpu_cgs_set_clockgating_state +}; + +static const struct cgs_os_ops amdgpu_cgs_os_ops = { + amdgpu_cgs_import_gpu_mem, + amdgpu_cgs_add_irq_source, + amdgpu_cgs_irq_get, + amdgpu_cgs_irq_put +}; + +void *amdgpu_cgs_create_device(struct amdgpu_device *adev) +{ + struct amdgpu_cgs_device *cgs_device = + kmalloc(sizeof(*cgs_device), GFP_KERNEL); + + if (!cgs_device) { + DRM_ERROR("Couldn't allocate CGS device structure\n"); + return NULL; + } + + cgs_device->base.ops = &amdgpu_cgs_ops; + cgs_device->base.os_ops = &amdgpu_cgs_os_ops; + cgs_device->adev = adev; + + return cgs_device; +} + +void amdgpu_cgs_destroy_device(void *cgs_device) +{ + kfree(cgs_device); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1f040d85ac47..e4424b4db5d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -126,12 +126,54 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } +static void amdgpu_job_work_func(struct work_struct *work) +{ + struct amdgpu_cs_parser *sched_job = + container_of(work, struct amdgpu_cs_parser, + job_work); + mutex_lock(&sched_job->job_lock); + if (sched_job->free_job) + sched_job->free_job(sched_job); + mutex_unlock(&sched_job->job_lock); + /* after processing job, free memory */ + fence_put(&sched_job->s_fence->base); + kfree(sched_job); +} +struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, + struct drm_file *filp, + struct amdgpu_ctx *ctx, + struct amdgpu_ib *ibs, + uint32_t num_ibs) +{ + struct amdgpu_cs_parser *parser; + int i; + + parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); + if (!parser) + return NULL; + + parser->adev = adev; + parser->filp = filp; + parser->ctx = ctx; + parser->ibs = ibs; + parser->num_ibs = num_ibs; + if (amdgpu_enable_scheduler) { + mutex_init(&parser->job_lock); + INIT_WORK(&parser->job_work, amdgpu_job_work_func); + } + for (i = 0; i < num_ibs; i++) + ibs[i].ctx = ctx; + + return parser; +} + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array = NULL; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + struct amdgpu_bo_list *bo_list = NULL; unsigned size, i; int r = 0; @@ -143,17 +185,30 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) r = -EINVAL; goto out; } - p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); + bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); + if (!amdgpu_enable_scheduler) + p->bo_list = bo_list; + else { + if (bo_list && !bo_list->has_userptr) { + p->bo_list = amdgpu_bo_list_clone(bo_list); + amdgpu_bo_list_put(bo_list); + if (!p->bo_list) + return -ENOMEM; + } else if (bo_list && bo_list->has_userptr) + p->bo_list = bo_list; + else + p->bo_list = NULL; + } /* get chunks */ INIT_LIST_HEAD(&p->validated); - chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); + chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); if (chunk_array == NULL) { r = -ENOMEM; goto out; } - chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks); + chunk_array_user = (uint64_t __user *)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { r = -EFAULT; @@ -161,7 +216,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } p->nchunks = cs->in.num_chunks; - p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk), + p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), GFP_KERNEL); if (p->chunks == NULL) { r = -ENOMEM; @@ -173,7 +228,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; + chunk_ptr = (void __user *)chunk_array[i]; if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { r = -EFAULT; @@ -183,7 +238,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)(unsigned long)user_chunk.chunk_data; + cdata = (void __user *)user_chunk.chunk_data; p->chunks[i].user_ptr = cdata; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); @@ -235,11 +290,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } } - p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!p->ibs) { + + p->ibs = kmalloc_array(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!p->ibs) r = -ENOMEM; - goto out; - } out: kfree(chunk_array); @@ -415,18 +469,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; } -/** - * cs_parser_fini() - clean parser states - * @parser: parser structure holding parsing context. - * @error: error number - * - * If error is set than unvalidate buffer, otherwise just free memory - * used by parsing context. - **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) +static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) { - unsigned i; - if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. @@ -447,11 +491,19 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } +} +static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) +{ + unsigned i; if (parser->ctx) amdgpu_ctx_put(parser->ctx); - if (parser->bo_list) - amdgpu_bo_list_put(parser->bo_list); + if (parser->bo_list) { + if (amdgpu_enable_scheduler && !parser->bo_list->has_userptr) + amdgpu_bo_list_free(parser->bo_list); + else + amdgpu_bo_list_put(parser->bo_list); + } drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); @@ -462,6 +514,29 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo kfree(parser->ibs); if (parser->uf.bo) drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); + + if (!amdgpu_enable_scheduler) + kfree(parser); +} + +/** + * cs_parser_fini() - clean parser states + * @parser: parser structure holding parsing context. + * @error: error number + * + * If error is set than unvalidate buffer, otherwise just free memory + * used by parsing context. + **/ +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) +{ + amdgpu_cs_parser_fini_early(parser, error, backoff); + amdgpu_cs_parser_fini_late(parser); +} + +static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job) +{ + amdgpu_cs_parser_fini_late(sched_job); + return 0; } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, @@ -476,12 +551,18 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (r) return r; + r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence); + if (r) + return r; + r = amdgpu_vm_clear_freed(adev, vm); if (r) return r; if (p->bo_list) { for (i = 0; i < p->bo_list->num_entries; i++) { + struct fence *f; + /* ignore duplicates */ bo = p->bo_list->array[i].robj; if (!bo) @@ -495,7 +576,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (r) return r; - amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update); + f = bo_va->last_pt_update; + r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); + if (r) + return r; } } @@ -529,9 +613,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, goto out; } amdgpu_cs_sync_rings(parser); - - r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, - parser->filp); + if (!amdgpu_enable_scheduler) + r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, + parser->filp); out: mutex_unlock(&vm->mutex); @@ -650,7 +734,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ib->oa_size = amdgpu_bo_size(oa); } } - /* wrap the last IB with user fence */ if (parser->uf.bo) { struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; @@ -693,9 +776,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, sizeof(struct drm_amdgpu_cs_chunk_dep); for (j = 0; j < num_deps; ++j) { - struct amdgpu_fence *fence; struct amdgpu_ring *ring; struct amdgpu_ctx *ctx; + struct fence *fence; r = amdgpu_cs_get_ring(adev, deps[j].ip_type, deps[j].ip_instance, @@ -707,50 +790,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, if (ctx == NULL) return -EINVAL; - r = amdgpu_fence_recreate(ring, p->filp, - deps[j].handle, - &fence); - if (r) { + fence = amdgpu_ctx_get_fence(ctx, ring, + deps[j].handle); + if (IS_ERR(fence)) { + r = PTR_ERR(fence); amdgpu_ctx_put(ctx); return r; - } - amdgpu_sync_fence(&ib->sync, fence); - amdgpu_fence_unref(&fence); - amdgpu_ctx_put(ctx); + } else if (fence) { + r = amdgpu_sync_fence(adev, &ib->sync, fence); + fence_put(fence); + amdgpu_ctx_put(ctx); + if (r) + return r; + } } } return 0; } -int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job) { - struct amdgpu_device *adev = dev->dev_private; - union drm_amdgpu_cs *cs = data; - struct amdgpu_cs_parser parser; int r, i; + struct amdgpu_cs_parser *parser = sched_job; + struct amdgpu_device *adev = sched_job->adev; bool reserved_buffers = false; - down_read(&adev->exclusive_lock); - if (!adev->accel_working) { - up_read(&adev->exclusive_lock); - return -EBUSY; - } - /* initialize parser */ - memset(&parser, 0, sizeof(struct amdgpu_cs_parser)); - parser.filp = filp; - parser.adev = adev; - r = amdgpu_cs_parser_init(&parser, data); - if (r) { - DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(&parser, r, false); - up_read(&adev->exclusive_lock); - r = amdgpu_cs_handle_lockup(adev, r); - return r; - } - - r = amdgpu_cs_parser_relocs(&parser); + r = amdgpu_cs_parser_relocs(parser); if (r) { if (r != -ERESTARTSYS) { if (r == -ENOMEM) @@ -762,30 +829,114 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (!r) { reserved_buffers = true; - r = amdgpu_cs_ib_fill(adev, &parser); + r = amdgpu_cs_ib_fill(adev, parser); + } + if (!r) { + r = amdgpu_cs_dependencies(adev, parser); + if (r) + DRM_ERROR("Failed in the dependencies handling %d!\n", r); } + if (r) { + amdgpu_cs_parser_fini(parser, r, reserved_buffers); + return r; + } + + for (i = 0; i < parser->num_ibs; i++) + trace_amdgpu_cs(parser, i); + + r = amdgpu_cs_ib_vm_chunk(adev, parser); + return r; +} + +static struct amdgpu_ring *amdgpu_cs_parser_get_ring( + struct amdgpu_device *adev, + struct amdgpu_cs_parser *parser) +{ + int i, r; + + struct amdgpu_cs_chunk *chunk; + struct drm_amdgpu_cs_chunk_ib *chunk_ib; + struct amdgpu_ring *ring; + for (i = 0; i < parser->nchunks; i++) { + chunk = &parser->chunks[i]; + chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; + + if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) + continue; + + r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, + chunk_ib->ip_instance, chunk_ib->ring, + &ring); + if (r) + return NULL; + break; + } + return ring; +} + +int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +{ + struct amdgpu_device *adev = dev->dev_private; + union drm_amdgpu_cs *cs = data; + struct amdgpu_cs_parser *parser; + int r; - if (!r) - r = amdgpu_cs_dependencies(adev, &parser); + down_read(&adev->exclusive_lock); + if (!adev->accel_working) { + up_read(&adev->exclusive_lock); + return -EBUSY; + } + parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); + if (!parser) + return -ENOMEM; + r = amdgpu_cs_parser_init(parser, data); if (r) { - amdgpu_cs_parser_fini(&parser, r, reserved_buffers); + DRM_ERROR("Failed to initialize parser !\n"); + amdgpu_cs_parser_fini(parser, r, false); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; } - for (i = 0; i < parser.num_ibs; i++) - trace_amdgpu_cs(&parser, i); + if (amdgpu_enable_scheduler && parser->num_ibs) { + struct amdgpu_ring * ring = + amdgpu_cs_parser_get_ring(adev, parser); + r = amdgpu_cs_parser_prepare_job(parser); + if (r) + goto out; + parser->ring = ring; + parser->free_job = amdgpu_cs_parser_free_job; + mutex_lock(&parser->job_lock); + r = amd_sched_push_job(ring->scheduler, + &parser->ctx->rings[ring->idx].entity, + parser, + &parser->s_fence); + if (r) { + mutex_unlock(&parser->job_lock); + goto out; + } + parser->ibs[parser->num_ibs - 1].sequence = + amdgpu_ctx_add_fence(parser->ctx, ring, + &parser->s_fence->base, + parser->s_fence->v_seq); + cs->out.handle = parser->s_fence->v_seq; + list_sort(NULL, &parser->validated, cmp_size_smaller_first); + ttm_eu_fence_buffer_objects(&parser->ticket, + &parser->validated, + &parser->s_fence->base); - r = amdgpu_cs_ib_vm_chunk(adev, &parser); - if (r) { - goto out; + mutex_unlock(&parser->job_lock); + up_read(&adev->exclusive_lock); + return 0; } + r = amdgpu_cs_parser_prepare_job(parser); + if (r) + goto out; - cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq; + cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; out: - amdgpu_cs_parser_fini(&parser, r, true); + amdgpu_cs_parser_fini(parser, r, true); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; @@ -806,30 +957,29 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, union drm_amdgpu_wait_cs *wait = data; struct amdgpu_device *adev = dev->dev_private; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); - struct amdgpu_fence *fence = NULL; struct amdgpu_ring *ring = NULL; struct amdgpu_ctx *ctx; + struct fence *fence; long r; - ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); - if (ctx == NULL) - return -EINVAL; - r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, wait->in.ring, &ring); - if (r) { - amdgpu_ctx_put(ctx); + if (r) return r; - } - r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); - if (r) { - amdgpu_ctx_put(ctx); - return r; - } + ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); + if (ctx == NULL) + return -EINVAL; + + fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); + if (IS_ERR(fence)) + r = PTR_ERR(fence); + else if (fence) { + r = fence_wait_timeout(fence, true, timeout); + fence_put(fence); + } else + r = 1; - r = fence_wait_timeout(&fence->base, true, timeout); - amdgpu_fence_unref(&fence); amdgpu_ctx_put(ctx); if (r < 0) return r; @@ -864,7 +1014,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, if (!reloc->bo_va) continue; - list_for_each_entry(mapping, &reloc->bo_va->mappings, list) { + list_for_each_entry(mapping, &reloc->bo_va->valids, list) { + if (mapping->it.start > addr || + addr > mapping->it.last) + continue; + + *bo = reloc->bo_va->bo; + return mapping; + } + + list_for_each_entry(mapping, &reloc->bo_va->invalids, list) { if (mapping->it.start > addr || addr > mapping->it.last) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6c66ac8a1891..08bc7722ddb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -25,54 +25,107 @@ #include <drm/drmP.h> #include "amdgpu.h" -static void amdgpu_ctx_do_release(struct kref *ref) +int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, + struct amdgpu_ctx *ctx) { - struct amdgpu_ctx *ctx; - struct amdgpu_ctx_mgr *mgr; + unsigned i, j; + int r; - ctx = container_of(ref, struct amdgpu_ctx, refcount); - mgr = &ctx->fpriv->ctx_mgr; + memset(ctx, 0, sizeof(*ctx)); + ctx->adev = adev; + kref_init(&ctx->refcount); + spin_lock_init(&ctx->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + ctx->rings[i].sequence = 1; - idr_remove(&mgr->ctx_handles, ctx->id); - kfree(ctx); + if (amdgpu_enable_scheduler) { + /* create context entity for each ring */ + for (i = 0; i < adev->num_rings; i++) { + struct amd_sched_rq *rq; + if (kernel) + rq = &adev->rings[i]->scheduler->kernel_rq; + else + rq = &adev->rings[i]->scheduler->sched_rq; + r = amd_sched_entity_init(adev->rings[i]->scheduler, + &ctx->rings[i].entity, + rq, amdgpu_sched_jobs); + if (r) + break; + } + + if (i < adev->num_rings) { + for (j = 0; j < i; j++) + amd_sched_entity_fini(adev->rings[j]->scheduler, + &ctx->rings[j].entity); + kfree(ctx); + return r; + } + } + return 0; } -int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags) +void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) +{ + struct amdgpu_device *adev = ctx->adev; + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) + fence_put(ctx->rings[i].fences[j]); + + if (amdgpu_enable_scheduler) { + for (i = 0; i < adev->num_rings; i++) + amd_sched_entity_fini(adev->rings[i]->scheduler, + &ctx->rings[i].entity); + } +} + +static int amdgpu_ctx_alloc(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, + uint32_t *id) { - int r; - struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx *ctx; + int r; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mutex_lock(&mgr->lock); - r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL); + r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); if (r < 0) { mutex_unlock(&mgr->lock); kfree(ctx); return r; } *id = (uint32_t)r; - - memset(ctx, 0, sizeof(*ctx)); - ctx->id = *id; - ctx->fpriv = fpriv; - kref_init(&ctx->refcount); + r = amdgpu_ctx_init(adev, false, ctx); mutex_unlock(&mgr->lock); - return 0; + return r; } -int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) +static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; + + ctx = container_of(ref, struct amdgpu_ctx, refcount); + + amdgpu_ctx_fini(ctx); + + kfree(ctx); +} + +static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) +{ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx *ctx; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); if (ctx) { + idr_remove(&mgr->ctx_handles, id); kref_put(&ctx->refcount, amdgpu_ctx_do_release); mutex_unlock(&mgr->lock); return 0; @@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, union drm_amdgpu_ctx_out *out) { struct amdgpu_ctx *ctx; - struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx_mgr *mgr; unsigned reset_counter; + if (!fpriv) + return -EINVAL; + + mgr = &fpriv->ctx_mgr; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); if (!ctx) { @@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, } /* TODO: these two are always zero */ - out->state.flags = ctx->state.flags; - out->state.hangs = ctx->state.hangs; + out->state.flags = 0x0; + out->state.hangs = 0x0; /* determine if a GPU reset has occured since the last call */ reset_counter = atomic_read(&adev->gpu_reset_counter); @@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, return 0; } -void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) -{ - struct idr *idp; - struct amdgpu_ctx *ctx; - uint32_t id; - struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; - idp = &mgr->ctx_handles; - - idr_for_each_entry(idp,ctx,id) { - if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) - DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id); - } - - mutex_destroy(&mgr->lock); -} - int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { int r; uint32_t id; - uint32_t flags; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = dev->dev_private; @@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, r = 0; id = args->in.ctx_id; - flags = args->in.flags; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: - r = amdgpu_ctx_alloc(adev, fpriv, &id, flags); + r = amdgpu_ctx_alloc(adev, fpriv, &id); args->out.alloc.ctx_id = id; break; case AMDGPU_CTX_OP_FREE_CTX: - r = amdgpu_ctx_free(adev, fpriv, id); + r = amdgpu_ctx_free(fpriv, id); break; case AMDGPU_CTX_OP_QUERY_STATE: r = amdgpu_ctx_query(adev, fpriv, id, &args->out); @@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) { struct amdgpu_ctx *ctx; - struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx_mgr *mgr; + + if (!fpriv) + return NULL; + + mgr = &fpriv->ctx_mgr; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); @@ -177,17 +221,96 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) int amdgpu_ctx_put(struct amdgpu_ctx *ctx) { - struct amdgpu_fpriv *fpriv; - struct amdgpu_ctx_mgr *mgr; - if (ctx == NULL) return -EINVAL; - fpriv = ctx->fpriv; - mgr = &fpriv->ctx_mgr; - mutex_lock(&mgr->lock); kref_put(&ctx->refcount, amdgpu_ctx_do_release); - mutex_unlock(&mgr->lock); - return 0; } + +uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, + struct fence *fence, uint64_t queued_seq) +{ + struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + uint64_t seq = 0; + unsigned idx = 0; + struct fence *other = NULL; + + if (amdgpu_enable_scheduler) + seq = queued_seq; + else + seq = cring->sequence; + idx = seq % AMDGPU_CTX_MAX_CS_PENDING; + other = cring->fences[idx]; + if (other) { + signed long r; + r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + } + + fence_get(fence); + + spin_lock(&ctx->ring_lock); + cring->fences[idx] = fence; + if (!amdgpu_enable_scheduler) + cring->sequence++; + spin_unlock(&ctx->ring_lock); + + fence_put(other); + + return seq; +} + +struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct amdgpu_ring *ring, uint64_t seq) +{ + struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + struct fence *fence; + uint64_t queued_seq; + + spin_lock(&ctx->ring_lock); + if (amdgpu_enable_scheduler) + queued_seq = amd_sched_next_queued_seq(&cring->entity); + else + queued_seq = cring->sequence; + + if (seq >= queued_seq) { + spin_unlock(&ctx->ring_lock); + return ERR_PTR(-EINVAL); + } + + + if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) { + spin_unlock(&ctx->ring_lock); + return NULL; + } + + fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); + spin_unlock(&ctx->ring_lock); + + return fence; +} + +void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) +{ + mutex_init(&mgr->lock); + idr_init(&mgr->ctx_handles); +} + +void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) +{ + struct amdgpu_ctx *ctx; + struct idr *idp; + uint32_t id; + + idp = &mgr->ctx_handles; + + idr_for_each_entry(idp, ctx, id) { + if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) + DRM_ERROR("ctx %p is still alive\n", ctx); + } + + idr_destroy(&mgr->ctx_handles); + mutex_destroy(&mgr->lock); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 99f158e1baff..42d1a22c1199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -55,6 +55,7 @@ static const char *amdgpu_asic_name[] = { "MULLINS", "TOPAZ", "TONGA", + "FIJI", "CARRIZO", "LAST", }; @@ -63,7 +64,7 @@ bool amdgpu_device_is_px(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; - if (adev->flags & AMDGPU_IS_PX) + if (adev->flags & AMD_IS_PX) return true; return false; } @@ -1160,6 +1161,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_TOPAZ: case CHIP_TONGA: + case CHIP_FIJI: case CHIP_CARRIZO: if (adev->asic_type == CHIP_CARRIZO) adev->family = AMDGPU_FAMILY_CZ; @@ -1377,7 +1379,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ddev = ddev; adev->pdev = pdev; adev->flags = flags; - adev->asic_type = flags & AMDGPU_ASIC_MASK; + adev->asic_type = flags & AMD_ASIC_MASK; adev->is_atom_bios = false; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; adev->mc.gtt_size = 512 * 1024 * 1024; @@ -1523,6 +1525,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } + r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx); + if (r) { + dev_err(adev->dev, "failed to create kernel context (%d).\n", r); + return r; + } r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); @@ -1584,6 +1591,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->shutdown = true; /* evict vram memory */ amdgpu_bo_evict_vram(adev); + amdgpu_ctx_fini(&adev->kernel_ctx); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); @@ -1627,8 +1635,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) struct amdgpu_device *adev; struct drm_crtc *crtc; struct drm_connector *connector; - int i, r; - bool force_completion = false; + int r; if (dev == NULL || dev->dev_private == NULL) { return -ENODEV; @@ -1667,21 +1674,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) /* evict vram memory */ amdgpu_bo_evict_vram(adev); - /* wait for gpu to finish processing current batch */ - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (!ring) - continue; - - r = amdgpu_fence_wait_empty(ring); - if (r) { - /* delay GPU reset to resume */ - force_completion = true; - } - } - if (force_completion) { - amdgpu_fence_driver_force_completion(adev); - } + amdgpu_fence_driver_suspend(adev); r = amdgpu_suspend(adev); @@ -1739,6 +1732,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) r = amdgpu_resume(adev); + amdgpu_fence_driver_resume(adev); + r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b16b9256883e..e3d70772b531 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,6 +35,36 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> +static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, + struct fence **f) +{ + struct amdgpu_fence *fence; + long r; + + if (*f == NULL) + return; + + fence = to_amdgpu_fence(*f); + if (fence) { + r = fence_wait(&fence->base, false); + if (r == -EDEADLK) { + up_read(&adev->exclusive_lock); + r = amdgpu_gpu_reset(adev); + down_read(&adev->exclusive_lock); + } + } else + r = fence_wait(*f, false); + + if (r) + DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); + + /* We continue with the page flip even if we failed to wait on + * the fence, otherwise the DRM core and userspace will be + * confused about which BO the CRTC is scanning out + */ + fence_put(*f); + *f = NULL; +} static void amdgpu_flip_work_func(struct work_struct *__work) { @@ -44,34 +74,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work) struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &amdgpuCrtc->base; - struct amdgpu_fence *fence; unsigned long flags; - int r; + unsigned i; down_read(&adev->exclusive_lock); - if (work->fence) { - fence = to_amdgpu_fence(work->fence); - if (fence) { - r = amdgpu_fence_wait(fence, false); - if (r == -EDEADLK) { - up_read(&adev->exclusive_lock); - r = amdgpu_gpu_reset(adev); - down_read(&adev->exclusive_lock); - } - } else - r = fence_wait(work->fence, false); - - if (r) - DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); - - /* We continue with the page flip even if we failed to wait on - * the fence, otherwise the DRM core and userspace will be - * confused about which BO the CRTC is scanning out - */ - - fence_put(work->fence); - work->fence = NULL; - } + amdgpu_flip_wait_fence(adev, &work->excl); + for (i = 0; i < work->shared_count; ++i) + amdgpu_flip_wait_fence(adev, &work->shared[i]); /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) DRM_ERROR("failed to reserve buffer after flip\n"); drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + kfree(work->shared); kfree(work); } @@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, unsigned long flags; u64 tiling_flags; u64 base; - int r; + int i, r; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) @@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, goto cleanup; } - work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); + r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, + &work->shared_count, + &work->shared); + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(new_rbo); + DRM_ERROR("failed to get fences for buffer\n"); + goto cleanup; + } + + fence_get(work->excl); + for (i = 0; i < work->shared_count; ++i) + fence_get(work->shared[i]); + amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); amdgpu_bo_unreserve(new_rbo); @@ -212,7 +234,10 @@ pflip_cleanup: cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); - fence_put(work->fence); + fence_put(work->excl); + for (i = 0; i < work->shared_count; ++i) + fence_put(work->shared[i]); + kfree(work->shared); kfree(work); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 115906f5fda0..e6fa27805207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -63,7 +63,7 @@ int amdgpu_disp_priority = 0; int amdgpu_hw_i2c = 0; int amdgpu_pcie_gen2 = -1; int amdgpu_msi = -1; -int amdgpu_lockup_timeout = 10000; +int amdgpu_lockup_timeout = 0; int amdgpu_dpm = -1; int amdgpu_smc_load_fw = 1; int amdgpu_aspm = -1; @@ -75,6 +75,9 @@ int amdgpu_deep_color = 0; int amdgpu_vm_size = 8; int amdgpu_vm_block_size = -1; int amdgpu_exp_hw_support = 0; +int amdgpu_enable_scheduler = 0; +int amdgpu_sched_jobs = 16; +int amdgpu_sched_hw_submission = 2; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -103,7 +106,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); module_param_named(msi, amdgpu_msi, int, 0444); -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)"); module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444); MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); @@ -139,36 +142,45 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); +MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))"); +module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); + +MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)"); +module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); + +MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); +module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); + static struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ - {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, + {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, /* Bonaire */ - {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, - {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, - {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, - {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, + {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, + {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, + {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, + {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, @@ -190,39 +202,39 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, /* Kabini */ - {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, + {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, /* mullins */ - {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, + {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, @@ -240,12 +252,14 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, + /* fiji */ + {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, /* carrizo */ - {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, + {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, {0, 0, 0} }; @@ -281,7 +295,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, unsigned long flags = ent->driver_data; int ret; - if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { + if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { DRM_INFO("This hardware requires experimental hardware support.\n" "See modparam exp_hw_support\n"); return -ENODEV; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h index cceeb33c447a..e3a4f7048042 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h @@ -31,7 +31,7 @@ #include <linux/firmware.h> #include <linux/platform_device.h> -#include "amdgpu_family.h" +#include "amd_shared.h" /* General customization: */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index c1645d21f8e2..81b821247dde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -53,9 +53,9 @@ static struct fb_ops amdgpufb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -179,7 +179,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper, struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct amdgpu_bo *rbo = NULL; - struct device *device = &adev->pdev->dev; int ret; unsigned long tmp; @@ -201,9 +200,9 @@ static int amdgpufb_create(struct drm_fb_helper *helper, rbo = gem_to_amdgpu_bo(gobj); /* okay we have an object now allocate the framebuffer */ - info = framebuffer_alloc(0, device); - if (info == NULL) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_unref; } @@ -212,14 +211,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper, ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); - goto out_unref; + goto out_destroy_fbi; } fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; - rfbdev->helper.fbdev = info; memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); @@ -239,11 +237,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper, drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_unref; - } info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = adev->mc.aper_size; @@ -251,13 +244,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, if (info->screen_base == NULL) { ret = -ENOSPC; - goto out_unref; - } - - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out_unref; + goto out_destroy_fbi; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); @@ -269,6 +256,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper, vga_switcheroo_client_fb_set(adev->ddev->pdev, info); return 0; +out_destroy_fbi: + drm_fb_helper_release_fbi(helper); out_unref: if (rbo) { @@ -290,17 +279,10 @@ void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev) static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) { - struct fb_info *info; struct amdgpu_framebuffer *rfb = &rfbdev->rfb; - if (rfbdev->helper.fbdev) { - info = rfbdev->helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&rfbdev->helper); + drm_fb_helper_release_fbi(&rfbdev->helper); if (rfb->obj) { amdgpufb_destroy_pinned_object(rfb->obj); @@ -395,7 +377,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev) void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) { if (adev->mode_info.rfbdev) - fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state); + drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper, + state); } int amdgpu_fbdev_total_size(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index a7189a1fa6a1..98500f1756f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, (*fence)->ring = ring; (*fence)->owner = owner; fence_init(&(*fence)->base, &amdgpu_fence_ops, - &adev->fence_queue.lock, adev->fence_context + ring->idx, + &ring->fence_drv.fence_queue.lock, + adev->fence_context + ring->idx, (*fence)->seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, @@ -136,38 +137,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, } /** - * amdgpu_fence_recreate - recreate a fence from an user fence - * - * @ring: ring the fence is associated with - * @owner: creator of the fence - * @seq: user fence sequence number - * @fence: resulting amdgpu fence object - * - * Recreates a fence command from the user fence sequence number (all asics). - * Returns 0 on success, -ENOMEM on failure. - */ -int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, - uint64_t seq, struct amdgpu_fence **fence) -{ - struct amdgpu_device *adev = ring->adev; - - if (seq > ring->fence_drv.sync_seq[ring->idx]) - return -EINVAL; - - *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); - if ((*fence) == NULL) - return -ENOMEM; - - (*fence)->seq = seq; - (*fence)->ring = ring; - (*fence)->owner = owner; - fence_init(&(*fence)->base, &amdgpu_fence_ops, - &adev->fence_queue.lock, adev->fence_context + ring->idx, - (*fence)->seq); - return 0; -} - -/** * amdgpu_fence_check_signaled - callback from fence_queue * * this function is called with fence_queue lock held, which is also used @@ -196,9 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl else FENCE_TRACE(&fence->base, "was already signaled\n"); - amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src, - fence->ring->fence_drv.irq_type); - __remove_wait_queue(&adev->fence_queue, &fence->fence_wake); + __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake); fence_put(&fence->base); } else FENCE_TRACE(&fence->base, "pending\n"); @@ -299,14 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) return; } - if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) { - fence_drv->delayed_irq = false; - amdgpu_irq_update(ring->adev, fence_drv->irq_src, - fence_drv->irq_type); + if (amdgpu_fence_activity(ring)) { + wake_up_all(&ring->fence_drv.fence_queue); } - - if (amdgpu_fence_activity(ring)) - wake_up_all(&ring->adev->fence_queue); else if (amdgpu_ring_is_lockup(ring)) { /* good news we believe it's a lockup */ dev_warn(ring->adev->dev, "GPU lockup (current fence id " @@ -316,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) /* remember that we need an reset */ ring->adev->needs_reset = true; - wake_up_all(&ring->adev->fence_queue); + wake_up_all(&ring->fence_drv.fence_queue); } up_read(&ring->adev->exclusive_lock); } @@ -332,62 +294,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) */ void amdgpu_fence_process(struct amdgpu_ring *ring) { - uint64_t seq, last_seq, last_emitted; - unsigned count_loop = 0; - bool wake = false; - - /* Note there is a scenario here for an infinite loop but it's - * very unlikely to happen. For it to happen, the current polling - * process need to be interrupted by another process and another - * process needs to update the last_seq btw the atomic read and - * xchg of the current process. - * - * More over for this to go in infinite loop there need to be - * continuously new fence signaled ie amdgpu_fence_read needs - * to return a different value each time for both the currently - * polling process and the other process that xchg the last_seq - * btw atomic read and xchg of the current process. And the - * value the other process set as last seq must be higher than - * the seq value we just read. Which means that current process - * need to be interrupted after amdgpu_fence_read and before - * atomic xchg. - * - * To be even more safe we count the number of time we loop and - * we bail after 10 loop just accepting the fact that we might - * have temporarly set the last_seq not to the true real last - * seq but to an older one. - */ - last_seq = atomic64_read(&ring->fence_drv.last_seq); - do { - last_emitted = ring->fence_drv.sync_seq[ring->idx]; - seq = amdgpu_fence_read(ring); - seq |= last_seq & 0xffffffff00000000LL; - if (seq < last_seq) { - seq &= 0xffffffff; - seq |= last_emitted & 0xffffffff00000000LL; - } - - if (seq <= last_seq || seq > last_emitted) { - break; - } - /* If we loop over we don't want to return without - * checking if a fence is signaled as it means that the - * seq we just read is different from the previous on. - */ - wake = true; - last_seq = seq; - if ((count_loop++) > 10) { - /* We looped over too many time leave with the - * fact that we might have set an older fence - * seq then the current real last seq as signaled - * by the hw. - */ - break; - } - } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); - - if (wake) - wake_up_all(&ring->adev->fence_queue); + if (amdgpu_fence_activity(ring)) + wake_up_all(&ring->fence_drv.fence_queue); } /** @@ -447,284 +355,49 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_ring *ring = fence->ring; - struct amdgpu_device *adev = ring->adev; if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) return false; - if (down_read_trylock(&adev->exclusive_lock)) { - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - if (amdgpu_fence_activity(ring)) - wake_up_all_locked(&adev->fence_queue); - - /* did fence get signaled after we enabled the sw irq? */ - if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) { - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - up_read(&adev->exclusive_lock); - return false; - } - - up_read(&adev->exclusive_lock); - } else { - /* we're probably in a lockup, lets not fiddle too much */ - if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type)) - ring->fence_drv.delayed_irq = true; - amdgpu_fence_schedule_check(ring); - } - fence->fence_wake.flags = 0; fence->fence_wake.private = NULL; fence->fence_wake.func = amdgpu_fence_check_signaled; - __add_wait_queue(&adev->fence_queue, &fence->fence_wake); + __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); fence_get(f); FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); return true; } -/** - * amdgpu_fence_signaled - check if a fence has signaled - * - * @fence: amdgpu fence object - * - * Check if the requested fence has signaled (all asics). - * Returns true if the fence has signaled or false if it has not. - */ -bool amdgpu_fence_signaled(struct amdgpu_fence *fence) -{ - if (!fence) - return true; - - if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) { - if (!fence_signal(&fence->base)) - FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n"); - return true; - } - - return false; -} - -/** - * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled - * - * @adev: amdgpu device pointer - * @seq: sequence numbers - * - * Check if the last signaled fence sequnce number is >= the requested - * sequence number (all asics). - * Returns true if any has signaled (current value is >= requested value) - * or false if it has not. Helper function for amdgpu_fence_wait_seq. - */ -static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq) -{ - unsigned i; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (!adev->rings[i] || !seq[i]) - continue; - - if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i])) - return true; - } - - return false; -} - -/** - * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers - * - * @adev: amdgpu device pointer - * @target_seq: sequence number(s) we want to wait for - * @intr: use interruptable sleep - * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait - * - * Wait for the requested sequence number(s) to be written by any ring - * (all asics). Sequnce number array is indexed by ring id. - * @intr selects whether to use interruptable (true) or non-interruptable - * (false) sleep when waiting for the sequence number. Helper function - * for amdgpu_fence_wait_*(). - * Returns remaining time if the sequence number has passed, 0 when - * the wait timeout, or an error for all other cases. - * -EDEADLK is returned when a GPU lockup has been detected. - */ -static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, - u64 *target_seq, bool intr, - long timeout) -{ - uint64_t last_seq[AMDGPU_MAX_RINGS]; - bool signaled; - int i; - long r; - - if (timeout == 0) { - return amdgpu_fence_any_seq_signaled(adev, target_seq); - } - - while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) { - - /* Save current sequence values, used to check for GPU lockups */ - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !target_seq[i]) - continue; - - last_seq[i] = atomic64_read(&ring->fence_drv.last_seq); - trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]); - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - } - - if (intr) { - r = wait_event_interruptible_timeout(adev->fence_queue, ( - (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) - || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); - } else { - r = wait_event_timeout(adev->fence_queue, ( - (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) - || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); - } - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !target_seq[i]) - continue; - - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]); - } - - if (unlikely(r < 0)) - return r; - - if (unlikely(!signaled)) { - - if (adev->needs_reset) - return -EDEADLK; - - /* we were interrupted for some reason and fence - * isn't signaled yet, resume waiting */ - if (r) - continue; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !target_seq[i]) - continue; - - if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq)) - break; - } - - if (i != AMDGPU_MAX_RINGS) - continue; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (!adev->rings[i] || !target_seq[i]) - continue; - - if (amdgpu_ring_is_lockup(adev->rings[i])) - break; - } - - if (i < AMDGPU_MAX_RINGS) { - /* good news we believe it's a lockup */ - dev_warn(adev->dev, "GPU lockup (waiting for " - "0x%016llx last fence id 0x%016llx on" - " ring %d)\n", - target_seq[i], last_seq[i], i); - - /* remember that we need an reset */ - adev->needs_reset = true; - wake_up_all(&adev->fence_queue); - return -EDEADLK; - } - - if (timeout < MAX_SCHEDULE_TIMEOUT) { - timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; - if (timeout <= 0) { - return 0; - } - } - } - } - return timeout; -} - -/** - * amdgpu_fence_wait - wait for a fence to signal - * - * @fence: amdgpu fence object - * @intr: use interruptable sleep - * - * Wait for the requested fence to signal (all asics). - * @intr selects whether to use interruptable (true) or non-interruptable - * (false) sleep when waiting for the fence. - * Returns 0 if the fence has passed, error for all other cases. - */ -int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr) -{ - uint64_t seq[AMDGPU_MAX_RINGS] = {}; - long r; - - seq[fence->ring->idx] = fence->seq; - r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT); - if (r < 0) { - return r; - } - - r = fence_signal(&fence->base); - if (!r) - FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); - return 0; -} - -/** - * amdgpu_fence_wait_any - wait for a fence to signal on any ring - * - * @adev: amdgpu device pointer - * @fences: amdgpu fence object(s) - * @intr: use interruptable sleep +/* + * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal + * @ring: ring to wait on for the seq number + * @seq: seq number wait for * - * Wait for any requested fence to signal (all asics). Fence - * array is indexed by ring id. @intr selects whether to use - * interruptable (true) or non-interruptable (false) sleep when - * waiting for the fences. Used by the suballocator. - * Returns 0 if any fence has passed, error for all other cases. + * return value: + * 0: seq signaled, and gpu not hang + * -EDEADL: GPU hang detected + * -EINVAL: some paramter is not valid */ -int amdgpu_fence_wait_any(struct amdgpu_device *adev, - struct amdgpu_fence **fences, - bool intr) +static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) { - uint64_t seq[AMDGPU_MAX_RINGS]; - unsigned i, num_rings = 0; - long r; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - seq[i] = 0; + struct amdgpu_device *adev = ring->adev; + bool signaled = false; - if (!fences[i]) { - continue; - } + BUG_ON(!ring); + if (seq > ring->fence_drv.sync_seq[ring->idx]) + return -EINVAL; - seq[i] = fences[i]->seq; - ++num_rings; - } + if (atomic64_read(&ring->fence_drv.last_seq) >= seq) + return 0; - /* nothing to wait for ? */ - if (num_rings == 0) - return -ENOENT; + wait_event(ring->fence_drv.fence_queue, ( + (signaled = amdgpu_fence_seq_signaled(ring, seq)) + || adev->needs_reset)); - r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT); - if (r < 0) { - return r; - } - return 0; + if (signaled) + return 0; + else + return -EDEADLK; } /** @@ -739,19 +412,12 @@ int amdgpu_fence_wait_any(struct amdgpu_device *adev, */ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) { - uint64_t seq[AMDGPU_MAX_RINGS] = {}; - long r; + uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; - seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; - if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) { - /* nothing to wait for, last_seq is - already the last emited fence */ + if (seq >= ring->fence_drv.sync_seq[ring->idx]) return -ENOENT; - } - r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT); - if (r < 0) - return r; - return 0; + + return amdgpu_fence_ring_wait_seq(ring, seq); } /** @@ -766,23 +432,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) */ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { - struct amdgpu_device *adev = ring->adev; - uint64_t seq[AMDGPU_MAX_RINGS] = {}; - long r; + uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; - seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx]; - if (!seq[ring->idx]) + if (!seq) return 0; - r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT); - if (r < 0) { - if (r == -EDEADLK) - return -EDEADLK; - - dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", - ring->idx, r); - } - return 0; + return amdgpu_fence_ring_wait_seq(ring, seq); } /** @@ -933,9 +588,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; } amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); - ring->fence_drv.initialized = true; + amdgpu_irq_get(adev, irq_src, irq_type); + ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; + ring->fence_drv.initialized = true; + dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " "cpu addr 0x%p\n", ring->idx, ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); @@ -966,6 +624,16 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, amdgpu_fence_check_lockup); ring->fence_drv.ring = ring; + + if (amdgpu_enable_scheduler) { + ring->scheduler = amd_sched_create((void *)ring->adev, + &amdgpu_sched_ops, + ring->idx, 5, 0, + amdgpu_sched_hw_submission); + if (!ring->scheduler) + DRM_ERROR("Failed to create scheduler on ring %d.\n", + ring->idx); + } } /** @@ -982,7 +650,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) */ int amdgpu_fence_driver_init(struct amdgpu_device *adev) { - init_waitqueue_head(&adev->fence_queue); if (amdgpu_debugfs_fence_init(adev)) dev_err(adev->dev, "fence debugfs file creation failed\n"); @@ -1011,13 +678,78 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(adev); } - wake_up_all(&adev->fence_queue); + wake_up_all(&ring->fence_drv.fence_queue); + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + if (ring->scheduler) + amd_sched_destroy(ring->scheduler); ring->fence_drv.initialized = false; } mutex_unlock(&adev->ring_lock); } /** + * amdgpu_fence_driver_suspend - suspend the fence driver + * for all possible rings. + * + * @adev: amdgpu device pointer + * + * Suspend the fence driver for all possible rings (all asics). + */ +void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) +{ + int i, r; + + mutex_lock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) + continue; + + /* wait for gpu to finish processing current batch */ + r = amdgpu_fence_wait_empty(ring); + if (r) { + /* delay GPU reset to resume */ + amdgpu_fence_driver_force_completion(adev); + } + + /* disable the interrupt */ + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + } + mutex_unlock(&adev->ring_lock); +} + +/** + * amdgpu_fence_driver_resume - resume the fence driver + * for all possible rings. + * + * @adev: amdgpu device pointer + * + * Resume the fence driver for all possible rings (all asics). + * Not all asics have all rings, so each asic will only + * start the fence driver on the rings it has using + * amdgpu_fence_driver_start_ring(). + * Returns 0 for success. + */ +void amdgpu_fence_driver_resume(struct amdgpu_device *adev) +{ + int i; + + mutex_lock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) + continue; + + /* enable the interrupt */ + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + } + mutex_unlock(&adev->ring_lock); +} + +/** * amdgpu_fence_driver_force_completion - force all fence waiter to complete * * @adev: amdgpu device pointer @@ -1104,6 +836,22 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } +static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) +{ + int idx; + struct amdgpu_fence *fence; + + idx = 0; + for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { + fence = fences[idx]; + if (fence) { + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) + return true; + } + } + return false; +} + struct amdgpu_wait_cb { struct fence_cb base; struct task_struct *task; @@ -1119,14 +867,35 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, signed long t) { + struct amdgpu_fence *array[AMDGPU_MAX_RINGS]; struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_device *adev = fence->ring->adev; - struct amdgpu_wait_cb cb; - cb.task = current; + memset(&array[0], 0, sizeof(array)); + array[0] = fence; + + return amdgpu_fence_wait_any(adev, array, intr, t); +} + +/* wait until any fence in array signaled */ +signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, + struct amdgpu_fence **array, bool intr, signed long t) +{ + long idx = 0; + struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS]; + struct amdgpu_fence *fence; + + BUG_ON(!array); - if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb)) - return t; + for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { + fence = array[idx]; + if (fence) { + cb[idx].task = current; + if (fence_add_callback(&fence->base, + &cb[idx].base, amdgpu_fence_wait_cb)) + return t; /* return if fence is already signaled */ + } + } while (t > 0) { if (intr) @@ -1135,10 +904,10 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, set_current_state(TASK_UNINTERRUPTIBLE); /* - * amdgpu_test_signaled must be called after + * amdgpu_test_signaled_any must be called after * set_current_state to prevent a race with wake_up_process */ - if (amdgpu_test_signaled(fence)) + if (amdgpu_test_signaled_any(array)) break; if (adev->needs_reset) { @@ -1153,7 +922,13 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, } __set_current_state(TASK_RUNNING); - fence_remove_callback(f, &cb.base); + + idx = 0; + for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { + fence = array[idx]; + if (fence) + fence_remove_callback(&fence->base, &cb[idx].base); + } return t; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index bc0fac618a3f..5104e64e9ad8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -88,6 +88,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, ib->fence = NULL; ib->user = NULL; ib->vm = vm; + ib->ctx = NULL; ib->gds_base = 0; ib->gds_size = 0; ib->gws_base = 0; @@ -142,6 +143,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_ring *ring; struct amdgpu_ctx *ctx, *old_ctx; struct amdgpu_vm *vm; + uint64_t sequence; unsigned i; int r = 0; @@ -165,9 +167,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (vm) { /* grab a vm id if necessary */ - struct amdgpu_fence *vm_id_fence = NULL; - vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm); - amdgpu_sync_fence(&ibs->sync, vm_id_fence); + r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); + if (r) { + amdgpu_ring_unlock_undo(ring); + return r; + } } r = amdgpu_sync_rings(&ibs->sync, ring); @@ -212,11 +216,18 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, return r; } + sequence = amdgpu_enable_scheduler ? ib->sequence : 0; + + if (!amdgpu_enable_scheduler && ib->ctx) + ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, + &ib->fence->base, + sequence); + /* wrap the last IB with fence */ if (ib->user) { uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); addr += ib->user->offset; - amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, + amdgpu_ring_emit_fence(ring, addr, ib->sequence, AMDGPU_FENCE_FLAG_64BIT); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index fb44dd2231b1..90044b254404 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -206,6 +206,8 @@ restart_ih: amdgpu_amdkfd_interrupt(adev, (const void *) &adev->irq.ih.ring[ring_index]); + entry.iv_entry = (const uint32_t *) + &adev->irq.ih.ring[ring_index]; amdgpu_ih_decode_iv(adev, &entry); adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index c62b09e555d6..ba38ae6a1463 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -52,6 +52,7 @@ struct amdgpu_iv_entry { unsigned ring_id; unsigned vm_id; unsigned pas_id; + const uint32_t *iv_entry; }; int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index b4d36f0f2153..0aba8e9bc8a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -272,6 +272,11 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) kfree(src->enabled_types); src->enabled_types = NULL; + if (src->data) { + kfree(src->data); + kfree(src); + adev->irq.sources[i] = NULL; + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 8299795f2b2d..17b01aef4278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -40,6 +40,7 @@ struct amdgpu_irq_src { unsigned num_types; atomic_t *enabled_types; const struct amdgpu_irq_src_funcs *funcs; + void *data; }; /* provided by interrupt generating IP blocks */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 93000af92283..87da6b1848fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -96,8 +96,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) if ((amdgpu_runtime_pm != 0) && amdgpu_has_atpx() && - ((flags & AMDGPU_IS_APU) == 0)) - flags |= AMDGPU_IS_PX; + ((flags & AMD_IS_APU) == 0)) + flags |= AMD_IS_PX; /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, @@ -451,11 +451,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; dev_info._pad = 0; dev_info.ids_flags = 0; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; - dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL); + dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; @@ -527,10 +527,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) mutex_init(&fpriv->bo_list_lock); idr_init(&fpriv->bo_list_handles); - /* init context manager */ - mutex_init(&fpriv->ctx_mgr.lock); - idr_init(&fpriv->ctx_mgr.ctx_handles); - fpriv->ctx_mgr.adev = adev; + amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); file_priv->driver_priv = fpriv; @@ -571,8 +568,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, idr_destroy(&fpriv->bo_list_handles); mutex_destroy(&fpriv->bo_list_lock); - /* release context */ - amdgpu_ctx_fini(fpriv); + amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); kfree(fpriv); file_priv->driver_priv = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8da64245b31b..57adcad2f7ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -223,18 +223,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, size_t acc_size; int r; - /* VI has a hw bug where VM PTEs have to be allocated in groups of 8. - * do this as a temporary workaround - */ - if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { - if (adev->asic_type >= CHIP_TOPAZ) { - if (byte_align & 0x7fff) - byte_align = ALIGN(byte_align, 0x8000); - if (size & 0x7fff) - size = ALIGN(size, 0x8000); - } - } - page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; size = ALIGN(size, PAGE_SIZE); @@ -462,7 +450,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ - if (0 && (adev->flags & AMDGPU_IS_APU)) { + if (0 && (adev->flags & AMD_IS_APU)) { /* Useless to evict on IGP chips */ return 0; } @@ -478,7 +466,6 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev) } dev_err(adev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { - mutex_lock(&adev->ddev->struct_mutex); dev_err(adev->dev, "%p %p %lu %lu force free\n", &bo->gem_base, bo, (unsigned long)bo->gem_base.size, *((unsigned long *)&bo->gem_base.refcount)); @@ -486,8 +473,7 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev) list_del_init(&bo->list); mutex_unlock(&bo->adev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_unreference(&bo->gem_base); - mutex_unlock(&adev->ddev->struct_mutex); + drm_gem_object_unreference_unlocked(&bo->gem_base); } } @@ -658,13 +644,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) * @shared: true if fence should be added shared * */ -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared) { struct reservation_object *resv = bo->tbo.resv; if (shared) - reservation_object_add_shared_fence(resv, &fence->base); + reservation_object_add_shared_fence(resv, fence); else - reservation_object_add_excl_fence(resv, &fence->base); + reservation_object_add_excl_fence(resv, fence); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 675bdc30e41d..238465a9ac55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -161,7 +161,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ed13baa7c976..efed11509f4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -82,7 +82,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, mutex_unlock(&adev->pm.mutex); /* Can't set dpm state when the card is off */ - if (!(adev->flags & AMDGPU_IS_PX) || + if (!(adev->flags & AMD_IS_PX) || (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) amdgpu_pm_compute_clocks(adev); fail: @@ -538,7 +538,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) /* vce just modifies an existing state so force a change */ if (ps->vce_active != adev->pm.dpm.vce_active) goto force; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { /* for APUs if the num crtcs changed but state is the same, * all we need to do is update the display configuration. */ @@ -580,7 +580,6 @@ force: amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } - mutex_lock(&adev->ddev->struct_mutex); mutex_lock(&adev->ring_lock); /* update whether vce is active */ @@ -628,7 +627,6 @@ force: done: mutex_unlock(&adev->ring_lock); - mutex_unlock(&adev->ddev->struct_mutex); } void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 855e2196657a..7d442c51063e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -342,6 +342,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, amdgpu_fence_driver_init_ring(ring); } + init_waitqueue_head(&ring->fence_drv.fence_queue); + r = amdgpu_wb_get(adev, &ring->rptr_offs); if (r) { dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); @@ -367,7 +369,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; - + spin_lock_init(&ring->fence_lock); r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); if (r) { dev_err(adev->dev, "failed initializing fences (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index eb20987ce18d..d6398cf45f24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -160,7 +160,8 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { - if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) { + if (sa_bo->fence == NULL || + !fence_is_signaled(&sa_bo->fence->base)) { return; } amdgpu_sa_bo_remove_locked(sa_bo); @@ -274,7 +275,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, sa_bo = list_first_entry(&sa_manager->flist[i], struct amdgpu_sa_bo, flist); - if (!amdgpu_fence_signaled(sa_bo->fence)) { + if (!fence_is_signaled(&sa_bo->fence->base)) { fences[i] = sa_bo->fence; continue; } @@ -317,6 +318,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, struct amdgpu_fence *fences[AMDGPU_MAX_RINGS]; unsigned tries[AMDGPU_MAX_RINGS]; int i, r; + signed long t; BUG_ON(align > sa_manager->align); BUG_ON(size > sa_manager->size); @@ -350,7 +352,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); spin_unlock(&sa_manager->wq.lock); - r = amdgpu_fence_wait_any(adev, fences, false); + t = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT); + r = (t > 0) ? 0 : t; spin_lock(&sa_manager->wq.lock); /* if we have nothing to wait for block */ if (r == -ENOENT) { @@ -379,7 +382,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, sa_manager = (*sa_bo)->manager; spin_lock(&sa_manager->wq.lock); - if (fence && !amdgpu_fence_signaled(fence)) { + if (fence && !fence_is_signaled(&fence->base)) { (*sa_bo)->fence = amdgpu_fence_ref(fence); list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[fence->ring->idx]); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c new file mode 100644 index 000000000000..a86e38158afa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -0,0 +1,145 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <drm/drmP.h> +#include "amdgpu.h" + +static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_job *job) +{ + int r = 0; + struct amdgpu_cs_parser *sched_job; + if (!job || !job->data) { + DRM_ERROR("job is null\n"); + return -EINVAL; + } + + sched_job = (struct amdgpu_cs_parser *)job->data; + if (sched_job->prepare_job) { + r = sched_job->prepare_job(sched_job); + if (r) { + DRM_ERROR("Prepare job error\n"); + schedule_work(&sched_job->job_work); + } + } + return r; +} + +static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_job *job) +{ + int r = 0; + struct amdgpu_cs_parser *sched_job; + struct amdgpu_fence *fence; + + if (!job || !job->data) { + DRM_ERROR("job is null\n"); + return NULL; + } + sched_job = (struct amdgpu_cs_parser *)job->data; + mutex_lock(&sched_job->job_lock); + r = amdgpu_ib_schedule(sched_job->adev, + sched_job->num_ibs, + sched_job->ibs, + sched_job->filp); + if (r) + goto err; + fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); + + if (sched_job->run_job) { + r = sched_job->run_job(sched_job); + if (r) + goto err; + } + + mutex_unlock(&sched_job->job_lock); + return &fence->base; + +err: + DRM_ERROR("Run job error\n"); + mutex_unlock(&sched_job->job_lock); + schedule_work(&sched_job->job_work); + return NULL; +} + +static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, + struct amd_sched_job *job) +{ + struct amdgpu_cs_parser *sched_job; + + if (!job || !job->data) { + DRM_ERROR("job is null\n"); + return; + } + sched_job = (struct amdgpu_cs_parser *)job->data; + schedule_work(&sched_job->job_work); +} + +struct amd_sched_backend_ops amdgpu_sched_ops = { + .prepare_job = amdgpu_sched_prepare_job, + .run_job = amdgpu_sched_run_job, + .process_job = amdgpu_sched_process_job +}; + +int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_ib *ibs, + unsigned num_ibs, + int (*free_job)(struct amdgpu_cs_parser *), + void *owner, + struct fence **f) +{ + int r = 0; + if (amdgpu_enable_scheduler) { + struct amdgpu_cs_parser *sched_job = + amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, + ibs, num_ibs); + if(!sched_job) { + return -ENOMEM; + } + sched_job->free_job = free_job; + mutex_lock(&sched_job->job_lock); + r = amd_sched_push_job(ring->scheduler, + &adev->kernel_ctx.rings[ring->idx].entity, + sched_job, &sched_job->s_fence); + if (r) { + mutex_unlock(&sched_job->job_lock); + kfree(sched_job); + return r; + } + ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; + *f = fence_get(&sched_job->s_fence->base); + mutex_unlock(&sched_job->job_lock); + } else { + r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); + if (r) + return r; + *f = fence_get(&ibs[num_ibs - 1].fence->base); + } + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 21accbdd0a1a..7cb711fc1ee2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -53,20 +53,24 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) } /** - * amdgpu_sync_fence - use the semaphore to sync to a fence + * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to * @fence: fence to sync to * - * Sync to the fence using the semaphore objects */ -void amdgpu_sync_fence(struct amdgpu_sync *sync, - struct amdgpu_fence *fence) +int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct fence *f) { + struct amdgpu_fence *fence; struct amdgpu_fence *other; - if (!fence) - return; + if (!f) + return 0; + + fence = to_amdgpu_fence(f); + if (!fence || fence->ring->adev != adev) + return fence_wait(f, true); other = sync->sync_to[fence->ring->idx]; sync->sync_to[fence->ring->idx] = amdgpu_fence_ref( @@ -79,6 +83,8 @@ void amdgpu_sync_fence(struct amdgpu_sync *sync, amdgpu_fence_later(fence, other)); amdgpu_fence_unref(&other); } + + return 0; } /** @@ -106,11 +112,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, /* always sync to the exclusive fence */ f = reservation_object_get_excl(resv); - fence = f ? to_amdgpu_fence(f) : NULL; - if (fence && fence->ring->adev == adev) - amdgpu_sync_fence(sync, fence); - else if (f) - r = fence_wait(f, true); + r = amdgpu_sync_fence(adev, sync, f); flist = reservation_object_get_list(resv); if (!flist || r) @@ -121,14 +123,26 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, reservation_object_held(resv)); fence = f ? to_amdgpu_fence(f) : NULL; if (fence && fence->ring->adev == adev) { - if (fence->owner != owner || - fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED) - amdgpu_sync_fence(sync, fence); - } else if (f) { - r = fence_wait(f, true); - if (r) - break; + /* VM updates are only interesting + * for other VM updates and moves. + */ + if ((owner != AMDGPU_FENCE_OWNER_MOVE) && + (fence->owner != AMDGPU_FENCE_OWNER_MOVE) && + ((owner == AMDGPU_FENCE_OWNER_VM) != + (fence->owner == AMDGPU_FENCE_OWNER_VM))) + continue; + + /* Ignore fence from the same owner as + * long as it isn't undefined. + */ + if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && + fence->owner == owner) + continue; } + + r = amdgpu_sync_fence(adev, sync, f); + if (r) + break; } return r; } @@ -164,9 +178,9 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, return -EINVAL; } - if (count >= AMDGPU_NUM_SYNCS) { + if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { /* not enough room, wait manually */ - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; @@ -186,7 +200,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, if (!amdgpu_semaphore_emit_signal(other, semaphore)) { /* signaling wasn't successful wait manually */ amdgpu_ring_undo(other); - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; @@ -196,7 +210,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { /* waiting wasn't successful wait manually */ amdgpu_ring_undo(other); - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index df202999fbfe..962dd5552137 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -116,7 +116,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) { DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); goto out_lclean_unpin; @@ -161,7 +161,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) { DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); goto out_lclean_unpin; @@ -238,7 +238,7 @@ void amdgpu_test_moves(struct amdgpu_device *adev) static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, struct amdgpu_ring *ring, - struct amdgpu_fence **fence) + struct fence **fence) { uint32_t handle = ring->idx ^ 0xdeafbeef; int r; @@ -269,15 +269,16 @@ static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, DRM_ERROR("Failed to get dummy destroy msg\n"); return r; } - } else { + struct amdgpu_fence *a_fence = NULL; r = amdgpu_ring_lock(ring, 64); if (r) { DRM_ERROR("Failed to lock ring A %d\n", ring->idx); return r; } - amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence); + amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence); amdgpu_ring_unlock_commit(ring); + *fence = &a_fence->base; } return 0; } @@ -286,7 +287,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, struct amdgpu_ring *ringA, struct amdgpu_ring *ringB) { - struct amdgpu_fence *fence1 = NULL, *fence2 = NULL; + struct fence *fence1 = NULL, *fence2 = NULL; struct amdgpu_semaphore *semaphore = NULL; int r; @@ -322,7 +323,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, mdelay(1000); - if (amdgpu_fence_signaled(fence1)) { + if (fence_is_signaled(fence1)) { DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); goto out_cleanup; } @@ -335,7 +336,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, amdgpu_semaphore_emit_signal(ringB, semaphore); amdgpu_ring_unlock_commit(ringB); - r = amdgpu_fence_wait(fence1, false); + r = fence_wait(fence1, false); if (r) { DRM_ERROR("Failed to wait for sync fence 1\n"); goto out_cleanup; @@ -343,7 +344,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, mdelay(1000); - if (amdgpu_fence_signaled(fence2)) { + if (fence_is_signaled(fence2)) { DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); goto out_cleanup; } @@ -356,7 +357,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, amdgpu_semaphore_emit_signal(ringB, semaphore); amdgpu_ring_unlock_commit(ringB); - r = amdgpu_fence_wait(fence2, false); + r = fence_wait(fence2, false); if (r) { DRM_ERROR("Failed to wait for sync fence 1\n"); goto out_cleanup; @@ -366,10 +367,10 @@ out_cleanup: amdgpu_semaphore_free(adev, &semaphore, NULL); if (fence1) - amdgpu_fence_unref(&fence1); + fence_put(fence1); if (fence2) - amdgpu_fence_unref(&fence2); + fence_put(fence2); if (r) printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); @@ -380,7 +381,7 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, struct amdgpu_ring *ringB, struct amdgpu_ring *ringC) { - struct amdgpu_fence *fenceA = NULL, *fenceB = NULL; + struct fence *fenceA = NULL, *fenceB = NULL; struct amdgpu_semaphore *semaphore = NULL; bool sigA, sigB; int i, r; @@ -416,11 +417,11 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, mdelay(1000); - if (amdgpu_fence_signaled(fenceA)) { + if (fence_is_signaled(fenceA)) { DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); goto out_cleanup; } - if (amdgpu_fence_signaled(fenceB)) { + if (fence_is_signaled(fenceB)) { DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); goto out_cleanup; } @@ -435,8 +436,8 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, for (i = 0; i < 30; ++i) { mdelay(100); - sigA = amdgpu_fence_signaled(fenceA); - sigB = amdgpu_fence_signaled(fenceB); + sigA = fence_is_signaled(fenceA); + sigB = fence_is_signaled(fenceB); if (sigA || sigB) break; } @@ -461,12 +462,12 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, mdelay(1000); - r = amdgpu_fence_wait(fenceA, false); + r = fence_wait(fenceA, false); if (r) { DRM_ERROR("Failed to wait for sync fence A\n"); goto out_cleanup; } - r = amdgpu_fence_wait(fenceB, false); + r = fence_wait(fenceB, false); if (r) { DRM_ERROR("Failed to wait for sync fence B\n"); goto out_cleanup; @@ -476,10 +477,10 @@ out_cleanup: amdgpu_semaphore_free(adev, &semaphore, NULL); if (fenceA) - amdgpu_fence_unref(&fenceA); + fence_put(fenceA); if (fenceB) - amdgpu_fence_unref(&fenceB); + fence_put(fenceB); if (r) printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2f7a5efa21c2..68369cf1e318 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -52,6 +52,7 @@ #endif #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" +#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -81,6 +82,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS); #endif MODULE_FIRMWARE(FIRMWARE_TONGA); MODULE_FIRMWARE(FIRMWARE_CARRIZO); +MODULE_FIRMWARE(FIRMWARE_FIJI); static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -116,6 +118,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_TONGA: fw_name = FIRMWARE_TONGA; break; + case CHIP_FIJI: + fw_name = FIRMWARE_FIJI; + break; case CHIP_CARRIZO: fw_name = FIRMWARE_CARRIZO; break; @@ -283,7 +288,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { uint32_t handle = atomic_read(&adev->uvd.handles[i]); if (handle != 0 && adev->uvd.filp[i] == filp) { - struct amdgpu_fence *fence; + struct fence *fence; amdgpu_uvd_note_usage(adev); @@ -293,8 +298,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) continue; } - amdgpu_fence_wait(fence, false); - amdgpu_fence_unref(&fence); + fence_wait(fence, false); + fence_put(fence); adev->uvd.filp[i] = NULL; atomic_set(&adev->uvd.handles[i], 0); @@ -375,6 +380,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) unsigned fs_in_mb = width_in_mb * height_in_mb; unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; + unsigned min_ctx_size = 0; image_size = width * height; image_size += image_size / 2; @@ -466,6 +472,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; min_dpb_size = image_size * num_dpb_buffer; + min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) + * 16 * num_dpb_buffer + 52 * 1024; break; default: @@ -486,6 +494,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) buf_sizes[0x1] = dpb_size; buf_sizes[0x2] = image_size; + buf_sizes[0x4] = min_ctx_size; return 0; } @@ -504,28 +513,25 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, { struct amdgpu_device *adev = ctx->parser->adev; int32_t *msg, msg_type, handle; - struct fence *f; void *ptr; - - int i, r; + long r; + int i; if (offset & 0x3F) { DRM_ERROR("UVD messages must be 64 byte aligned!\n"); return -EINVAL; } - f = reservation_object_get_excl(bo->tbo.resv); - if (f) { - r = amdgpu_fence_wait((struct amdgpu_fence *)f, false); - if (r) { - DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); - return r; - } + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + if (r < 0) { + DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); + return r; } r = amdgpu_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); + DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); return r; } @@ -628,6 +634,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) return -EINVAL; } + } else if (cmd == 0x206) { + if ((end - start) < ctx->buf_sizes[4]) { + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, + (unsigned)(end - start), + ctx->buf_sizes[4]); + return -EINVAL; + } } else if ((cmd != 0x100) && (cmd != 0x204)) { DRM_ERROR("invalid UVD command %X!\n", cmd); return -EINVAL; @@ -755,9 +768,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) struct amdgpu_uvd_cs_ctx ctx = {}; unsigned buf_sizes[] = { [0x00000000] = 2048, - [0x00000001] = 32 * 1024 * 1024, - [0x00000002] = 2048 * 1152 * 3, + [0x00000001] = 0xFFFFFFFF, + [0x00000002] = 0xFFFFFFFF, [0x00000003] = 2048, + [0x00000004] = 0xFFFFFFFF, }; struct amdgpu_ib *ib = &parser->ibs[ib_idx]; int r; @@ -792,14 +806,24 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return 0; } +static int amdgpu_uvd_free_job( + struct amdgpu_cs_parser *sched_job) +{ + amdgpu_ib_free(sched_job->adev, sched_job->ibs); + kfree(sched_job->ibs); + return 0; +} + static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, - struct amdgpu_fence **fence) + struct fence **fence) { struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct list_head head; - struct amdgpu_ib ib; + struct amdgpu_ib *ib = NULL; + struct fence *f = NULL; + struct amdgpu_device *adev = ring->adev; uint64_t addr; int i, r; @@ -821,34 +845,49 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (r) goto err; - - r = amdgpu_ib_get(ring, NULL, 64, &ib); - if (r) + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) { + r = -ENOMEM; goto err; + } + r = amdgpu_ib_get(ring, NULL, 64, ib); + if (r) + goto err1; addr = amdgpu_bo_gpu_offset(bo); - ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); - ib.ptr[1] = addr; - ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); - ib.ptr[3] = addr >> 32; - ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); - ib.ptr[5] = 0; + ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); + ib->ptr[1] = addr; + ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); + ib->ptr[3] = addr >> 32; + ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); + ib->ptr[5] = 0; for (i = 6; i < 16; ++i) - ib.ptr[i] = PACKET2(0); - ib.length_dw = 16; + ib->ptr[i] = PACKET2(0); + ib->length_dw = 16; - r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_uvd_free_job, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); if (r) - goto err; - ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); + goto err2; - if (fence) - *fence = amdgpu_fence_ref(ib.fence); + ttm_eu_fence_buffer_objects(&ticket, &head, f); - amdgpu_ib_free(ring->adev, &ib); + if (fence) + *fence = fence_get(f); amdgpu_bo_unref(&bo); - return 0; + fence_put(f); + if (amdgpu_enable_scheduler) + return 0; + amdgpu_ib_free(ring->adev, ib); + kfree(ib); + return 0; +err2: + amdgpu_ib_free(ring->adev, ib); +err1: + kfree(ib); err: ttm_eu_backoff_reservation(&ticket, &head); return r; @@ -858,7 +897,7 @@ err: crash the vcpu so just try to emmit a dummy create/destroy msg to avoid this */ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -905,7 +944,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 2255aa710e33..1724c2c86151 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); int amdgpu_uvd_suspend(struct amdgpu_device *adev); int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index d3ca73090e39..33ee6ae28f37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -48,6 +48,7 @@ #endif #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" +#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); @@ -58,6 +59,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS); #endif MODULE_FIRMWARE(FIRMWARE_TONGA); MODULE_FIRMWARE(FIRMWARE_CARRIZO); +MODULE_FIRMWARE(FIRMWARE_FIJI); static void amdgpu_vce_idle_work_handler(struct work_struct *work); @@ -101,6 +103,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_CARRIZO: fw_name = FIRMWARE_CARRIZO; break; + case CHIP_FIJI: + fw_name = FIRMWARE_FIJI; + break; default: return -EINVAL; @@ -334,6 +339,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } } +static int amdgpu_vce_free_job( + struct amdgpu_cs_parser *sched_job) +{ + amdgpu_ib_free(sched_job->adev, sched_job->ibs); + kfree(sched_job->ibs); + return 0; +} + /** * amdgpu_vce_get_create_msg - generate a VCE create msg * @@ -345,59 +358,69 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * Open up a stream for HW test */ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib ib; + struct amdgpu_ib *ib = NULL; + struct fence *f = NULL; + struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; + r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + kfree(ib); return r; } - dummy = ib.gpu_addr + 1024; + dummy = ib->gpu_addr + 1024; /* stitch together an VCE create msg */ - ib.length_dw = 0; - ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ - ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ - ib.ptr[ib.length_dw++] = handle; - - ib.ptr[ib.length_dw++] = 0x00000030; /* len */ - ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ - ib.ptr[ib.length_dw++] = 0x00000000; - ib.ptr[ib.length_dw++] = 0x00000042; - ib.ptr[ib.length_dw++] = 0x0000000a; - ib.ptr[ib.length_dw++] = 0x00000001; - ib.ptr[ib.length_dw++] = 0x00000080; - ib.ptr[ib.length_dw++] = 0x00000060; - ib.ptr[ib.length_dw++] = 0x00000100; - ib.ptr[ib.length_dw++] = 0x00000100; - ib.ptr[ib.length_dw++] = 0x0000000c; - ib.ptr[ib.length_dw++] = 0x00000000; - - ib.ptr[ib.length_dw++] = 0x00000014; /* len */ - ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ - ib.ptr[ib.length_dw++] = upper_32_bits(dummy); - ib.ptr[ib.length_dw++] = dummy; - ib.ptr[ib.length_dw++] = 0x00000001; - - for (i = ib.length_dw; i < ib_size_dw; ++i) - ib.ptr[i] = 0x0; - - r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - } - + ib->length_dw = 0; + ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ + ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ + ib->ptr[ib->length_dw++] = handle; + + ib->ptr[ib->length_dw++] = 0x00000030; /* len */ + ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000042; + ib->ptr[ib->length_dw++] = 0x0000000a; + ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000080; + ib->ptr[ib->length_dw++] = 0x00000060; + ib->ptr[ib->length_dw++] = 0x00000100; + ib->ptr[ib->length_dw++] = 0x00000100; + ib->ptr[ib->length_dw++] = 0x0000000c; + ib->ptr[ib->length_dw++] = 0x00000000; + + ib->ptr[ib->length_dw++] = 0x00000014; /* len */ + ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ + ib->ptr[ib->length_dw++] = upper_32_bits(dummy); + ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = 0x00000001; + + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vce_free_job, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err; if (fence) - *fence = amdgpu_fence_ref(ib.fence); - - amdgpu_ib_free(ring->adev, &ib); - + *fence = fence_get(f); + fence_put(f); + if (amdgpu_enable_scheduler) + return 0; +err: + amdgpu_ib_free(adev, ib); + kfree(ib); return r; } @@ -412,49 +435,59 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, * Close up a stream for HW test or if userspace failed to do so */ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib ib; + struct amdgpu_ib *ib = NULL; + struct fence *f = NULL; + struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; + + r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); if (r) { + kfree(ib); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; } - dummy = ib.gpu_addr + 1024; + dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ - ib.length_dw = 0; - ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ - ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ - ib.ptr[ib.length_dw++] = handle; - - ib.ptr[ib.length_dw++] = 0x00000014; /* len */ - ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ - ib.ptr[ib.length_dw++] = upper_32_bits(dummy); - ib.ptr[ib.length_dw++] = dummy; - ib.ptr[ib.length_dw++] = 0x00000001; - - ib.ptr[ib.length_dw++] = 0x00000008; /* len */ - ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ - - for (i = ib.length_dw; i < ib_size_dw; ++i) - ib.ptr[i] = 0x0; - - r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - } - + ib->length_dw = 0; + ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ + ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ + ib->ptr[ib->length_dw++] = handle; + + ib->ptr[ib->length_dw++] = 0x00000014; /* len */ + ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ + ib->ptr[ib->length_dw++] = upper_32_bits(dummy); + ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = 0x00000001; + + ib->ptr[ib->length_dw++] = 0x00000008; /* len */ + ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ + + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vce_free_job, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err; if (fence) - *fence = amdgpu_fence_ref(ib.fence); - - amdgpu_ib_free(ring->adev, &ib); - + *fence = fence_get(f); + fence_put(f); + if (amdgpu_enable_scheduler) + return 0; +err: + amdgpu_ib_free(adev, ib); + kfree(ib); return r; } @@ -800,7 +833,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) */ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) { - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; r = amdgpu_vce_get_create_msg(ring, 1, NULL); @@ -815,13 +848,13 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); } else { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); } error: - amdgpu_fence_unref(&fence); + fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 7ccdb5927da5..ba2da8ee5906 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9a4e3b63f1cb..a78a206e176e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, /** * amdgpu_vm_grab_id - allocate the next free VMID * - * @ring: ring we want to submit job to * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * @sync: sync object where we add dependencies * - * Allocate an id for the vm (cayman+). - * Returns the fence we need to sync to (if any). + * Allocate an id for the vm, adding fences to the sync obj as necessary. * - * Global and local mutex must be locked! + * Global mutex must be locked! */ -struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, - struct amdgpu_vm *vm) +int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync) { struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; @@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, /* check if the id is still valid */ if (vm_id->id && vm_id->last_id_use && vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) - return NULL; + return 0; /* we definately need to flush */ vm_id->pd_gpu_addr = ~0ll; @@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, /* found a free one */ vm_id->id = i; trace_amdgpu_vm_grab_id(i, ring->idx); - return NULL; + return 0; } if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { @@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, for (i = 0; i < 2; ++i) { if (choices[i]) { + struct amdgpu_fence *fence; + + fence = adev->vm_manager.active[choices[i]]; vm_id->id = choices[i]; + trace_amdgpu_vm_grab_id(choices[i], ring->idx); - return adev->vm_manager.active[choices[i]]; + return amdgpu_sync_fence(ring->adev, sync, &fence->base); } } /* should never happen */ BUG(); - return NULL; + return -EINVAL; } /** @@ -200,13 +204,15 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, { uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; + struct amdgpu_fence *flushed_updates = vm_id->flushed_updates; - if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || - amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) { + if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || + (updates && amdgpu_fence_is_earlier(flushed_updates, updates))) { trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); - amdgpu_fence_unref(&vm_id->flushed_updates); - vm_id->flushed_updates = amdgpu_fence_ref(updates); + vm_id->flushed_updates = amdgpu_fence_ref( + amdgpu_fence_later(flushed_updates, updates)); + amdgpu_fence_unref(&flushed_updates); vm_id->pd_gpu_addr = pd_addr; amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); } @@ -300,6 +306,16 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, } } +static int amdgpu_vm_free_job( + struct amdgpu_cs_parser *sched_job) +{ + int i; + for (i = 0; i < sched_job->num_ibs; i++) + amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); + kfree(sched_job->ibs); + return 0; +} + /** * amdgpu_vm_clear_bo - initially clear the page dir/table * @@ -310,7 +326,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo) { struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; - struct amdgpu_ib ib; + struct fence *fence = NULL; + struct amdgpu_ib *ib; unsigned entries; uint64_t addr; int r; @@ -330,24 +347,33 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; - r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib); - if (r) + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) goto error_unreserve; - ib.length_dw = 0; - - amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0); - amdgpu_vm_pad_ib(adev, &ib); - WARN_ON(ib.length_dw > 64); - - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); + r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); if (r) goto error_free; - amdgpu_bo_fence(bo, ib.fence, true); - + ib->length_dw = 0; + + amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); + amdgpu_vm_pad_ib(adev, ib); + WARN_ON(ib->length_dw > 64); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_VM, + &fence); + if (!r) + amdgpu_bo_fence(bo, fence, true); + fence_put(fence); + if (amdgpu_enable_scheduler) { + amdgpu_bo_unreserve(bo); + return 0; + } error_free: - amdgpu_ib_free(adev, &ib); + amdgpu_ib_free(adev, ib); + kfree(ib); error_unreserve: amdgpu_bo_unreserve(bo); @@ -400,7 +426,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; - struct amdgpu_ib ib; + struct amdgpu_ib *ib; + struct fence *fence = NULL; + int r; /* padding, etc. */ @@ -413,10 +441,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (ndw > 0xfffff) return -ENOMEM; - r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; + + r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); if (r) return r; - ib.length_dw = 0; + ib->length_dw = 0; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -436,7 +468,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, &ib, last_pde, + amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID, 0); } @@ -450,23 +482,37 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count, + amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID, 0); - if (ib.length_dw != 0) { - amdgpu_vm_pad_ib(adev, &ib); - amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(ib.length_dw > ndw); - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); - if (r) { - amdgpu_ib_free(adev, &ib); - return r; - } - amdgpu_bo_fence(pd, ib.fence, true); + if (ib->length_dw != 0) { + amdgpu_vm_pad_ib(adev, ib); + amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); + WARN_ON(ib->length_dw > ndw); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_VM, + &fence); + if (r) + goto error_free; + + amdgpu_bo_fence(pd, fence, true); + fence_put(vm->page_directory_fence); + vm->page_directory_fence = fence_get(fence); + fence_put(fence); + } + + if (!amdgpu_enable_scheduler || ib->length_dw == 0) { + amdgpu_ib_free(adev, ib); + kfree(ib); } - amdgpu_ib_free(adev, &ib); return 0; + +error_free: + amdgpu_ib_free(adev, ib); + kfree(ib); + return r; } /** @@ -640,7 +686,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, */ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, uint64_t start, uint64_t end, - struct amdgpu_fence *fence) + struct fence *fence) { unsigned i; @@ -670,12 +716,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint64_t addr, uint32_t gtt_flags, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; unsigned nptes, ncmds, ndw; uint32_t flags = gtt_flags; - struct amdgpu_ib ib; + struct amdgpu_ib *ib; + struct fence *f = NULL; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here @@ -722,46 +769,65 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (ndw > 0xfffff) return -ENOMEM; - r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); - if (r) + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; + + r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); + if (r) { + kfree(ib); return r; - ib.length_dw = 0; + } + + ib->length_dw = 0; if (!(flags & AMDGPU_PTE_VALID)) { unsigned i; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_fence *f = vm->ids[i].last_id_use; - amdgpu_sync_fence(&ib.sync, f); + r = amdgpu_sync_fence(adev, &ib->sync, &f->base); + if (r) + return r; } } - r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start, + r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, mapping->it.last + 1, addr + mapping->offset, flags, gtt_flags); if (r) { - amdgpu_ib_free(adev, &ib); + amdgpu_ib_free(adev, ib); + kfree(ib); return r; } - amdgpu_vm_pad_ib(adev, &ib); - WARN_ON(ib.length_dw > ndw); + amdgpu_vm_pad_ib(adev, ib); + WARN_ON(ib->length_dw > ndw); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_VM, + &f); + if (r) + goto error_free; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); - if (r) { - amdgpu_ib_free(adev, &ib); - return r; - } amdgpu_vm_fence_pts(vm, mapping->it.start, - mapping->it.last + 1, ib.fence); + mapping->it.last + 1, f); if (fence) { - amdgpu_fence_unref(fence); - *fence = amdgpu_fence_ref(ib.fence); + fence_put(*fence); + *fence = fence_get(f); + } + fence_put(f); + if (!amdgpu_enable_scheduler) { + amdgpu_ib_free(adev, ib); + kfree(ib); } - amdgpu_ib_free(adev, &ib); - return 0; + +error_free: + amdgpu_ib_free(adev, ib); + kfree(ib); + return r; } /** @@ -794,21 +860,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, addr = 0; } - if (addr == bo_va->addr) - return 0; - flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); - list_for_each_entry(mapping, &bo_va->mappings, list) { + spin_lock(&vm->status_lock); + if (!list_empty(&bo_va->vm_status)) + list_splice_init(&bo_va->valids, &bo_va->invalids); + spin_unlock(&vm->status_lock); + + list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, flags, &bo_va->last_pt_update); if (r) return r; } - bo_va->addr = addr; spin_lock(&vm->status_lock); + list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->vm_status); + if (!mem) + list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); return 0; @@ -861,7 +931,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = NULL; - int r; + int r = 0; spin_lock(&vm->status_lock); while (!list_empty(&vm->invalidated)) { @@ -878,8 +948,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); if (bo_va) - amdgpu_sync_fence(sync, bo_va->last_pt_update); - return 0; + r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); + + return r; } /** @@ -907,10 +978,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, } bo_va->vm = vm; bo_va->bo = bo; - bo_va->addr = 0; bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->bo_list); - INIT_LIST_HEAD(&bo_va->mappings); + INIT_LIST_HEAD(&bo_va->valids); + INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->vm_status); mutex_lock(&vm->mutex); @@ -999,12 +1070,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, mapping->offset = offset; mapping->flags = flags; - list_add(&mapping->list, &bo_va->mappings); + list_add(&mapping->list, &bo_va->invalids); interval_tree_insert(&mapping->it, &vm->va); trace_amdgpu_vm_bo_map(bo_va, mapping); - bo_va->addr = 0; - /* Make sure the page tables are allocated */ saddr >>= amdgpu_vm_block_size; eaddr >>= amdgpu_vm_block_size; @@ -1085,17 +1154,27 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_vm *vm = bo_va->vm; + bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; - list_for_each_entry(mapping, &bo_va->mappings, list) { + list_for_each_entry(mapping, &bo_va->valids, list) { if (mapping->it.start == saddr) break; } - if (&mapping->list == &bo_va->mappings) { - amdgpu_bo_unreserve(bo_va->bo); - return -ENOENT; + if (&mapping->list == &bo_va->valids) { + valid = false; + + list_for_each_entry(mapping, &bo_va->invalids, list) { + if (mapping->it.start == saddr) + break; + } + + if (&mapping->list == &bo_va->invalids) { + amdgpu_bo_unreserve(bo_va->bo); + return -ENOENT; + } } mutex_lock(&vm->mutex); @@ -1103,12 +1182,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, interval_tree_remove(&mapping->it, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); - if (bo_va->addr) { - /* clear the old address */ + if (valid) list_add(&mapping->list, &vm->freed); - } else { + else kfree(mapping); - } mutex_unlock(&vm->mutex); amdgpu_bo_unreserve(bo_va->bo); @@ -1139,16 +1216,19 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_del(&bo_va->vm_status); spin_unlock(&vm->status_lock); - list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { + list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); interval_tree_remove(&mapping->it, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); - if (bo_va->addr) - list_add(&mapping->list, &vm->freed); - else - kfree(mapping); + list_add(&mapping->list, &vm->freed); } - amdgpu_fence_unref(&bo_va->last_pt_update); + list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { + list_del(&mapping->list); + interval_tree_remove(&mapping->it, &vm->va); + kfree(mapping); + } + + fence_put(bo_va->last_pt_update); kfree(bo_va); mutex_unlock(&vm->mutex); @@ -1169,12 +1249,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va; list_for_each_entry(bo_va, &bo->va, bo_list) { - if (bo_va->addr) { - spin_lock(&bo_va->vm->status_lock); - list_del(&bo_va->vm_status); + spin_lock(&bo_va->vm->status_lock); + if (list_empty(&bo_va->vm_status)) list_add(&bo_va->vm_status, &bo_va->vm->invalidated); - spin_unlock(&bo_va->vm->status_lock); - } + spin_unlock(&bo_va->vm->status_lock); } } @@ -1202,6 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->va = RB_ROOT; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); + INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); pd_size = amdgpu_vm_directory_size(adev); @@ -1215,6 +1294,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) return -ENOMEM; } + vm->page_directory_fence = NULL; + r = amdgpu_bo_create(adev, pd_size, align, true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &vm->page_directory); @@ -1263,6 +1344,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) kfree(vm->page_tables); amdgpu_bo_unref(&vm->page_directory); + fence_put(vm->page_directory_fence); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { amdgpu_fence_unref(&vm->ids[i].flushed_updates); diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index ae8caca61e04..cd6edc40c9cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -812,7 +812,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; - if ((adev->flags & AMDGPU_IS_APU) && + if ((adev->flags & AMD_IS_APU) && (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { if (is_dp || !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) { diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index b3b66a0d5ff7..4b6ce74753cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -838,7 +838,7 @@ static u32 cik_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK) return reference_clock / 2; } else { @@ -1235,7 +1235,7 @@ static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) if (reset_mask & AMDGPU_RESET_VMC) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK; - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { if (reset_mask & AMDGPU_RESET_MC) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK; } @@ -1411,7 +1411,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) dev_warn(adev->dev, "Wait for MC idle timed out !\n"); } - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) kv_save_regs_for_reset(adev, &kv_save); /* disable BM */ @@ -1429,7 +1429,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) } /* does asic init need to be run first??? */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); } @@ -1570,7 +1570,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) if (amdgpu_pcie_gen2 == 0) return; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); @@ -1730,7 +1730,7 @@ static void cik_program_aspm(struct amdgpu_device *adev) return; /* XXX double check APUs */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 15df46c93f0a..2b4242b39b0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -614,6 +614,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; unsigned i; unsigned index; int r; @@ -629,12 +630,10 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - return r; + goto err0; } ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); @@ -643,20 +642,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err1; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + r = fence_wait(f, false); if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); - if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - return r; + goto err1; } for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -666,12 +661,17 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err1; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } + +err1: + fence_put(f); amdgpu_ib_free(adev, &ib); +err0: amdgpu_wb_free(adev, index); return r; } @@ -1404,5 +1404,6 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index e70a26f587a0..4b255ac3043c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -126,9 +126,31 @@ static const u32 tonga_mgcg_cgcg_init[] = mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, }; +static const u32 golden_settings_fiji_a10[] = +{ + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x1f311fff, 0x12300000, + mmHDMI_CONTROL, 0x31000111, 0x00000011, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, +}; + static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -803,11 +825,11 @@ static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev, buffer_alloc = 2; } else if (mode->crtc_hdisplay < 4096) { mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } else { DRM_DEBUG_KMS("Mode too big for LB!\n"); mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } } else { mem_cfg = 1; @@ -2888,6 +2910,7 @@ static int dce_v10_0_early_init(void *handle) dce_v10_0_set_irq_funcs(adev); switch (adev->asic_type) { + case CHIP_FIJI: case CHIP_TONGA: adev->mode_info.num_crtc = 6; /* XXX 7??? */ adev->mode_info.num_hpd = 6; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index dcb402ee048a..70eee807421f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -801,11 +801,11 @@ static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, buffer_alloc = 2; } else if (mode->crtc_hdisplay < 4096) { mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } else { DRM_DEBUG_KMS("Mode too big for LB!\n"); mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } } else { mem_cfg = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index cc050a329c49..c86911c2ea2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -770,11 +770,11 @@ static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev, buffer_alloc = 2; } else if (mode->crtc_hdisplay < 4096) { tmp = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } else { DRM_DEBUG_KMS("Mode too big for LB!\n"); tmp = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } } else { tmp = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c new file mode 100644 index 000000000000..8f9845d9a986 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c @@ -0,0 +1,181 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/firmware.h> +#include "drmP.h" +#include "amdgpu.h" +#include "fiji_smumgr.h" + +MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); + +static void fiji_dpm_set_funcs(struct amdgpu_device *adev); + +static int fiji_dpm_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + fiji_dpm_set_funcs(adev); + + return 0; +} + +static int fiji_dpm_init_microcode(struct amdgpu_device *adev) +{ + char fw_name[30] = "amdgpu/fiji_smc.bin"; + int err; + + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + +out: + if (err) { + DRM_ERROR("Failed to load firmware \"%s\"", fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; +} + +static int fiji_dpm_sw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ret = fiji_dpm_init_microcode(adev); + if (ret) + return ret; + + return 0; +} + +static int fiji_dpm_sw_fini(void *handle) +{ + return 0; +} + +static int fiji_dpm_hw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + mutex_lock(&adev->pm.mutex); + + ret = fiji_smu_init(adev); + if (ret) { + DRM_ERROR("SMU initialization failed\n"); + goto fail; + } + + ret = fiji_smu_start(adev); + if (ret) { + DRM_ERROR("SMU start failed\n"); + goto fail; + } + + mutex_unlock(&adev->pm.mutex); + return 0; + +fail: + adev->firmware.smu_load = false; + mutex_unlock(&adev->pm.mutex); + return -EINVAL; +} + +static int fiji_dpm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + mutex_lock(&adev->pm.mutex); + fiji_smu_fini(adev); + mutex_unlock(&adev->pm.mutex); + return 0; +} + +static int fiji_dpm_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + fiji_dpm_hw_fini(adev); + + return 0; +} + +static int fiji_dpm_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + fiji_dpm_hw_init(adev); + + return 0; +} + +static int fiji_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int fiji_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs fiji_dpm_ip_funcs = { + .early_init = fiji_dpm_early_init, + .late_init = NULL, + .sw_init = fiji_dpm_sw_init, + .sw_fini = fiji_dpm_sw_fini, + .hw_init = fiji_dpm_hw_init, + .hw_fini = fiji_dpm_hw_fini, + .suspend = fiji_dpm_suspend, + .resume = fiji_dpm_resume, + .is_idle = NULL, + .wait_for_idle = NULL, + .soft_reset = NULL, + .print_status = NULL, + .set_clockgating_state = fiji_dpm_set_clockgating_state, + .set_powergating_state = fiji_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs fiji_dpm_funcs = { + .get_temperature = NULL, + .pre_set_power_state = NULL, + .set_power_state = NULL, + .post_set_power_state = NULL, + .display_configuration_changed = NULL, + .get_sclk = NULL, + .get_mclk = NULL, + .print_power_state = NULL, + .debugfs_print_current_performance_level = NULL, + .force_performance_level = NULL, + .vblank_too_short = NULL, + .powergate_uvd = NULL, +}; + +static void fiji_dpm_set_funcs(struct amdgpu_device *adev) +{ + if (NULL == adev->pm.funcs) + adev->pm.funcs = &fiji_dpm_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h new file mode 100644 index 000000000000..3c4824082990 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h @@ -0,0 +1,182 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef FIJI_PP_SMC_H +#define FIJI_PP_SMC_H + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +//Gemini Modes +#define PPSMC_GeminiModeNone 0 //Single GPU board +#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board +#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board + +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) +#define PPSMC_Result_NotNow ((uint16_t)0x03) +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +typedef uint16_t PPSMC_Result; + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) +#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) +#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) +#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +#define PPSMC_MSG_Test ((uint16_t)0x100) +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254) + +typedef uint16_t PPSMC_Msg; + +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 +#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c new file mode 100644 index 000000000000..493c8c9c7faa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -0,0 +1,853 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/firmware.h> +#include "drmP.h" +#include "amdgpu.h" +#include "fiji_ppsmc.h" +#include "fiji_smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "amdgpu_ucode.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + +#define FIJI_SMC_SIZE 0x20000 + +static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) +{ + uint32_t val; + + if (smc_address & 3) + return -EINVAL; + + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmSMC_IND_INDEX_0, smc_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + return 0; +} + +static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + unsigned long flags; + + if (smc_start_address & 3) + return -EINVAL; + + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + while (byte_count >= 4) { + /* Bytes are written into the SMC addres space with the MSB first */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = fiji_set_smc_sram_address(adev, addr, limit); + + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = fiji_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + orig_data = RREG32(mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = fiji_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + } + +out: + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int fiji_program_jump_on_start(struct amdgpu_device *adev) +{ + static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; + fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +static bool fiji_is_smc_ram_running(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); + + return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); +} + +static int wait_smu_response(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32(mmSMC_RESP_0); + if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} + +static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, 0x20000); + WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send message\n"); + return -EINVAL; + } + + return 0; +} + +static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) +{ + if (!fiji_is_smc_ram_running(adev)) + { + return -EINVAL;; + } + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send message\n"); + return -EINVAL; + } + + return 0; +} + +static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, + PPSMC_Msg msg) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + return 0; +} + +static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, + uint32_t parameter) +{ + if (!fiji_is_smc_ram_running(adev)) + return -EINVAL; + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return fiji_send_msg_to_smc(adev, msg); +} + +static int fiji_send_msg_to_smc_with_parameter_without_waiting( + struct amdgpu_device *adev, + PPSMC_Msg msg, uint32_t parameter) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return fiji_send_msg_to_smc_without_waiting(adev, msg); +} + +#if 0 /* not used yet */ +static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + if (!fiji_is_smc_ram_running(adev)) + return -EINVAL; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} +#endif + +static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev) +{ + const struct smc_firmware_header_v1_0 *hdr; + uint32_t ucode_size; + uint32_t ucode_start_address; + const uint8_t *src; + uint32_t val; + uint32_t byte_count; + uint32_t *data; + unsigned long flags; + + if (!adev->pm.fw) + return -EINVAL; + + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); + + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + src = (const uint8_t *) + (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + if (ucode_size & 3) { + DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (ucode_size > FIJI_SMC_SIZE) { + DRM_ERROR("SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, ucode_start_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + byte_count = ucode_size; + data = (uint32_t *)src; + for (; byte_count >= 4; data++, byte_count -= 4) + WREG32(mmSMC_IND_DATA_0, data[0]); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return 0; +} + +#if 0 /* not used yet */ +static int fiji_read_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t *value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = fiji_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + *value = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int fiji_write_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = fiji_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + WREG32(mmSMC_IND_DATA_0, value); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int fiji_smu_stop_smc(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + return 0; +} +#endif + +static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case UCODE_ID_SDMA0: + return AMDGPU_UCODE_ID_SDMA0; + case UCODE_ID_SDMA1: + return AMDGPU_UCODE_ID_SDMA1; + case UCODE_ID_CP_CE: + return AMDGPU_UCODE_ID_CP_CE; + case UCODE_ID_CP_PFP: + return AMDGPU_UCODE_ID_CP_PFP; + case UCODE_ID_CP_ME: + return AMDGPU_UCODE_ID_CP_ME; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + return AMDGPU_UCODE_ID_CP_MEC1; + case UCODE_ID_RLC_G: + return AMDGPU_UCODE_ID_RLC_G; + default: + DRM_ERROR("ucode type is out of range!\n"); + return AMDGPU_UCODE_ID_MAXIMUM; + } +} + +static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev, + uint32_t fw_type, + struct SMU_Entry *entry) +{ + enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type); + struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; + const struct gfx_firmware_header_v1_0 *header = NULL; + uint64_t gpu_addr; + uint32_t data_size; + + if (ucode->fw == NULL) + return -EINVAL; + gpu_addr = ucode->mc_addr; + header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + data_size = le32_to_cpu(header->header.ucode_size_bytes); + + if ((fw_type == UCODE_ID_CP_MEC_JT1) || + (fw_type == UCODE_ID_CP_MEC_JT2)) { + gpu_addr += le32_to_cpu(header->jt_offset) << 2; + data_size = le32_to_cpu(header->jt_size) << 2; + } + + entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + entry->id = (uint16_t)fw_type; + entry->image_addr_high = upper_32_bits(gpu_addr); + entry->image_addr_low = lower_32_bits(gpu_addr); + entry->meta_data_addr_high = 0; + entry->meta_data_addr_low = 0; + entry->data_size_byte = data_size; + entry->num_register_entries = 0; + + if (fw_type == UCODE_ID_RLC_G) + entry->flags = 1; + else + entry->flags = 0; + + return 0; +} + +static int fiji_smu_request_load_fw(struct amdgpu_device *adev) +{ + struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv; + struct SMU_DRAMData_TOC *toc; + uint32_t fw_to_load; + + WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); + + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); + + toc = (struct SMU_DRAMData_TOC *)private->header; + toc->num_entries = 0; + toc->structure_version = 1; + + if (!adev->firmware.smu_load) + return 0; + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for RLC\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for CE\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for PFP\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for ME\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA0\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA1\n"); + return -EINVAL; + } + + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK | + UCODE_ID_SDMA0_MASK | + UCODE_ID_SDMA1_MASK | + UCODE_ID_CP_CE_MASK | + UCODE_ID_CP_ME_MASK | + UCODE_ID_CP_PFP_MASK | + UCODE_ID_CP_MEC_MASK; + + if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { + DRM_ERROR("Fail to request SMU load ucode\n"); + return -EINVAL; + } + + return 0; +} + +static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case AMDGPU_UCODE_ID_SDMA0: + return UCODE_ID_SDMA0_MASK; + case AMDGPU_UCODE_ID_SDMA1: + return UCODE_ID_SDMA1_MASK; + case AMDGPU_UCODE_ID_CP_CE: + return UCODE_ID_CP_CE_MASK; + case AMDGPU_UCODE_ID_CP_PFP: + return UCODE_ID_CP_PFP_MASK; + case AMDGPU_UCODE_ID_CP_ME: + return UCODE_ID_CP_ME_MASK; + case AMDGPU_UCODE_ID_CP_MEC1: + return UCODE_ID_CP_MEC_MASK; + case AMDGPU_UCODE_ID_CP_MEC2: + return UCODE_ID_CP_MEC_MASK; + case AMDGPU_UCODE_ID_RLC_G: + return UCODE_ID_RLC_G_MASK; + default: + DRM_ERROR("ucode type is out of range!\n"); + return 0; + } +} + +static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev, + uint32_t fw_type) +{ + uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type); + int i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev) +{ + int result; + uint32_t val; + int i; + + /* Assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + result = fiji_smu_upload_firmware_image(adev); + if (result) + return result; + + /* Clear status */ + WREG32_SMC(ixSMU_STATUS, 0); + + /* Enable clock */ + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + /* De-assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + /* Set SMU Auto Start */ + val = RREG32_SMC(ixSMU_INPUT_DATA); + val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); + WREG32_SMC(ixSMU_INPUT_DATA, val); + + /* Clear firmware interrupt enable flag */ + WREG32_SMC(ixFIRMWARE_FLAGS, 0); + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixRCU_UC_EVENTS); + if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Interrupt is not enabled by firmware\n"); + return -EINVAL; + } + + /* Call Test SMU message with 0x20000 offset + * to trigger SMU start + */ + fiji_send_msg_to_smc_offset(adev); + DRM_INFO("[FM]try triger smu start\n"); + /* Wait for done bit to be set */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixSMU_STATUS); + if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Timeout for SMU start\n"); + return -EINVAL; + } + + /* Check pass/failed indicator */ + val = RREG32_SMC(ixSMU_STATUS); + if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { + DRM_ERROR("SMU Firmware start failed\n"); + return -EINVAL; + } + DRM_INFO("[FM]smu started\n"); + /* Wait for firmware to initialize */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixFIRMWARE_FLAGS); + if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("SMU firmware initialization failed\n"); + return -EINVAL; + } + DRM_INFO("[FM]smu initialized\n"); + + return 0; +} + +static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev) +{ + int i, result; + uint32_t val; + + /* wait for smc boot up */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixRCU_UC_EVENTS); + val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); + if (val) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("SMC boot sequence is not completed\n"); + return -EINVAL; + } + + /* Clear firmware interrupt enable flag */ + WREG32_SMC(ixFIRMWARE_FLAGS, 0); + + /* Assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + result = fiji_smu_upload_firmware_image(adev); + if (result) + return result; + + /* Set smc instruct start point at 0x0 */ + fiji_program_jump_on_start(adev); + + /* Enable clock */ + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + /* De-assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + /* Wait for firmware to initialize */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixFIRMWARE_FLAGS); + if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Timeout for SMC firmware initialization\n"); + return -EINVAL; + } + + return 0; +} + +int fiji_smu_start(struct amdgpu_device *adev) +{ + int result; + uint32_t val; + + if (!fiji_is_smc_ram_running(adev)) { + val = RREG32_SMC(ixSMU_FIRMWARE); + if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { + DRM_INFO("[FM]start smu in nonprotection mode\n"); + result = fiji_smu_start_in_non_protection_mode(adev); + if (result) + return result; + } else { + DRM_INFO("[FM]start smu in protection mode\n"); + result = fiji_smu_start_in_protection_mode(adev); + if (result) + return result; + } + } + + return fiji_smu_request_load_fw(adev); +} + +static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = { + .check_fw_load_finish = fiji_smu_check_fw_load_finish, + .request_smu_load_fw = NULL, + .request_smu_specific_fw = NULL, +}; + +int fiji_smu_init(struct amdgpu_device *adev) +{ + struct fiji_smu_private_data *private; + uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + uint32_t smu_internal_buffer_size = 200*4096; + struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; + struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; + uint64_t mc_addr; + void *toc_buf_ptr; + void *smu_buf_ptr; + int ret; + + private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL); + if (NULL == private) + return -ENOMEM; + + /* allocate firmware buffers */ + if (adev->firmware.smu_load) + amdgpu_ucode_init_bo(adev); + + adev->smu.priv = private; + adev->smu.fw_flags = 0; + + /* Allocate FW image data structure and header buffer */ + ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); + if (ret) { + DRM_ERROR("Failed to allocate memory for TOC buffer\n"); + return -ENOMEM; + } + + /* Allocate buffer for SMU internal buffer */ + ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf); + if (ret) { + DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); + return -ENOMEM; + } + + /* Retrieve GPU address for header buffer and internal buffer */ + ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to reserve the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to pin the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to map the TOC buffer\n"); + return -EINVAL; + } + + amdgpu_bo_unreserve(adev->smu.toc_buf); + private->header_addr_low = lower_32_bits(mc_addr); + private->header_addr_high = upper_32_bits(mc_addr); + private->header = toc_buf_ptr; + + ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to reserve the SMU internal buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to pin the SMU internal buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to map the SMU internal buffer\n"); + return -EINVAL; + } + + amdgpu_bo_unreserve(adev->smu.smu_buf); + private->smu_buffer_addr_low = lower_32_bits(mc_addr); + private->smu_buffer_addr_high = upper_32_bits(mc_addr); + + adev->smu.smumgr_funcs = &fiji_smumgr_funcs; + + return 0; +} + +int fiji_smu_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_unref(&adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + kfree(adev->smu.priv); + adev->smu.priv = NULL; + if (adev->firmware.fw_buf) + amdgpu_ucode_fini_bo(adev); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h index 0698764354a2..1cef03deeac3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h @@ -1,7 +1,5 @@ /* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. + * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,42 +19,24 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse */ -/* this file defines the CHIP_ and family flags used in the pciids, - * its is common between kms and non-kms because duplicating it and - * changing one place is fail. - */ -#ifndef AMDGPU_FAMILY_H -#define AMDGPU_FAMILY_H -/* - * Supported ASIC types - */ -enum amdgpu_asic_type { - CHIP_BONAIRE = 0, - CHIP_KAVERI, - CHIP_KABINI, - CHIP_HAWAII, - CHIP_MULLINS, - CHIP_TOPAZ, - CHIP_TONGA, - CHIP_CARRIZO, - CHIP_LAST, -}; +#ifndef FIJI_SMUMGR_H +#define FIJI_SMUMGR_H -/* - * Chip flags - */ -enum amdgpu_chip_flags { - AMDGPU_ASIC_MASK = 0x0000ffffUL, - AMDGPU_FLAGS_MASK = 0xffff0000UL, - AMDGPU_IS_MOBILITY = 0x00010000UL, - AMDGPU_IS_APU = 0x00020000UL, - AMDGPU_IS_PX = 0x00040000UL, - AMDGPU_EXP_HW_SUPPORT = 0x00080000UL, +#include "fiji_ppsmc.h" + +int fiji_smu_init(struct amdgpu_device *adev); +int fiji_smu_fini(struct amdgpu_device *adev); +int fiji_smu_start(struct amdgpu_device *adev); + +struct fiji_smu_private_data +{ + uint8_t *header; + uint32_t smu_buffer_addr_high; + uint32_t smu_buffer_addr_low; + uint32_t header_addr_high; + uint32_t header_addr_low; }; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0d8bf2cb1956..9b0cab413677 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2173,7 +2173,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { /* Get memory bank mapping mode. */ tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); @@ -2648,6 +2648,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; unsigned i; @@ -2662,26 +2663,23 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; + goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err2; + + r = fence_wait(f, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - return r; + goto err2; } for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -2691,14 +2689,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err2; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } - amdgpu_gfx_scratch_free(adev, scratch); + +err2: + fence_put(f); amdgpu_ib_free(adev, &ib); +err1: + amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -3758,7 +3761,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) int r; /* allocate rlc buffers */ - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { if (adev->asic_type == CHIP_KAVERI) { adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list; adev->gfx.rlc.reg_list_size = diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f5a42ab1f65c..4b68e6306f40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -87,6 +87,13 @@ MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); MODULE_FIRMWARE("amdgpu/topaz_mec2.bin"); MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); +MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); +MODULE_FIRMWARE("amdgpu/fiji_pfp.bin"); +MODULE_FIRMWARE("amdgpu/fiji_me.bin"); +MODULE_FIRMWARE("amdgpu/fiji_mec.bin"); +MODULE_FIRMWARE("amdgpu/fiji_mec2.bin"); +MODULE_FIRMWARE("amdgpu/fiji_rlc.bin"); + static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, @@ -217,6 +224,71 @@ static const u32 tonga_mgcg_cgcg_init[] = mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, }; +static const u32 fiji_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e, + mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF +}; + +static const u32 golden_settings_fiji_a10[] = +{ + mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf30fff7f, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, +}; + static const u32 golden_settings_iceland_a11[] = { mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, @@ -439,6 +511,18 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) iceland_golden_common_all, (const u32)ARRAY_SIZE(iceland_golden_common_all)); break; + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_program_register_sequence(adev, + fiji_golden_common_all, + (const u32)ARRAY_SIZE(fiji_golden_common_all)); + break; + case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -526,6 +610,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; unsigned i; @@ -540,26 +625,23 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; + goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err2; + + r = fence_wait(f, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - return r; + goto err2; } for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -569,14 +651,18 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err2; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } - amdgpu_gfx_scratch_free(adev, scratch); +err2: + fence_put(f); amdgpu_ib_free(adev, &ib); +err1: + amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -601,6 +687,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_CARRIZO: chip_name = "carrizo"; break; + case CHIP_FIJI: + chip_name = "fiji"; + break; default: BUG(); } @@ -1236,6 +1325,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); } + case CHIP_FIJI: case CHIP_TONGA: for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { @@ -1984,6 +2074,23 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_FIJI: + adev->gfx.config.max_shader_engines = 4; + adev->gfx.config.max_tile_pipes = 16; + adev->gfx.config.max_cu_per_sh = 16; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 8; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; + break; case CHIP_TONGA: adev->gfx.config.max_shader_engines = 4; adev->gfx.config.max_tile_pipes = 8; @@ -2078,7 +2185,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { /* Get memory bank mapping mode. */ tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); @@ -2490,6 +2597,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); switch (adev->asic_type) { case CHIP_TONGA: + case CHIP_FIJI: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x0000002A); break; @@ -3135,7 +3243,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, - 0x7FFFF << 2); + AMDGPU_DOORBELL_MEC_RING7 << 2); } tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -3875,7 +3983,8 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; if (ring->adev->asic_type == CHIP_TOPAZ || - ring->adev->asic_type == CHIP_TONGA) + ring->adev->asic_type == CHIP_TONGA || + ring->adev->asic_type == CHIP_FIJI) /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */ return false; else { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index ae37fce36520..10218828face 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -636,7 +636,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev) adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; /* base offset of vram pages */ - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { u64 tmp = RREG32(mmMC_VM_FB_OFFSET); tmp <<= 22; adev->vm_manager.vram_base_offset = tmp; @@ -841,7 +841,7 @@ static int gmc_v7_0_early_init(void *handle) gmc_v7_0_set_gart_funcs(adev); gmc_v7_0_set_irq_funcs(adev); - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); @@ -957,7 +957,7 @@ static int gmc_v7_0_hw_init(void *handle) gmc_v7_0_mc_program(adev); - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { r = gmc_v7_0_mc_load_microcode(adev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -1172,7 +1172,7 @@ static int gmc_v7_0_soft_reset(void *handle) if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { - if (!(adev->flags & AMDGPU_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } @@ -1282,7 +1282,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle, if (state == AMD_CG_STATE_GATE) gate = true; - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { gmc_v7_0_enable_mc_mgcg(adev, gate); gmc_v7_0_enable_mc_ls(adev, gate); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 8135963a66be..78109b750d29 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -44,6 +44,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); +MODULE_FIRMWARE("amdgpu/fiji_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -61,6 +62,19 @@ static const u32 tonga_mgcg_cgcg_init[] = mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; +static const u32 golden_settings_fiji_a10[] = +{ + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 +}; + static const u32 golden_settings_iceland_a11[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, @@ -90,6 +104,14 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_iceland_a11, (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); break; + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -202,6 +224,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_TONGA: chip_name = "tonga"; break; + case CHIP_FIJI: + chip_name = "fiji"; + break; case CHIP_CARRIZO: return 0; default: BUG(); @@ -737,7 +762,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev) adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; /* base offset of vram pages */ - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { u64 tmp = RREG32(mmMC_VM_FB_OFFSET); tmp <<= 22; adev->vm_manager.vram_base_offset = tmp; @@ -816,7 +841,7 @@ static int gmc_v8_0_early_init(void *handle) gmc_v8_0_set_gart_funcs(adev); gmc_v8_0_set_irq_funcs(adev); - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); @@ -934,7 +959,7 @@ static int gmc_v8_0_hw_init(void *handle) gmc_v8_0_mc_program(adev); - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { r = gmc_v8_0_mc_load_microcode(adev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -1147,7 +1172,7 @@ static int gmc_v8_0_soft_reset(void *handle) if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { - if (!(adev->flags & AMDGPU_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index a988dfb1d394..9de8104eddeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -673,6 +673,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; unsigned i; unsigned index; int r; @@ -688,12 +689,10 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - return r; + goto err0; } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | @@ -707,19 +706,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err1; + + r = fence_wait(f, false); if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - return r; + goto err1; } for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -729,12 +725,17 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err1; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } + +err1: + fence_put(f); amdgpu_ib_free(adev, &ib); +err0: amdgpu_wb_free(adev, index); return r; } @@ -1415,5 +1416,6 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 2b86569b18d3..029f3455f9f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -53,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/tonga_sdma.bin"); MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin"); MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin"); MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/fiji_sdma.bin"); +MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin"); static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = { @@ -80,6 +82,24 @@ static const u32 tonga_mgcg_cgcg_init[] = mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 }; +static const u32 golden_settings_fiji_a10[] = +{ + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, + mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 +}; + static const u32 cz_golden_settings_a11[] = { mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, @@ -122,6 +142,14 @@ static const u32 cz_mgcg_cgcg_init[] = static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -167,6 +195,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) case CHIP_TONGA: chip_name = "tonga"; break; + case CHIP_FIJI: + chip_name = "fiji"; + break; case CHIP_CARRIZO: chip_name = "carrizo"; break; @@ -763,6 +794,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; unsigned i; unsigned index; int r; @@ -778,12 +810,10 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - return r; + goto err0; } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | @@ -797,19 +827,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err1; + + r = fence_wait(f, false); if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - return r; + goto err1; } for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -819,12 +846,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err1; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } +err1: + fence_put(f); amdgpu_ib_free(adev, &ib); +err0: amdgpu_wb_free(adev, index); return r; } @@ -1509,5 +1540,6 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 4efd671d7a9b..9ac383bc6c1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -534,7 +534,7 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); @@ -555,14 +555,14 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto error; } DRM_INFO("ib test on ring %d succeeded\n", ring->idx); error: - amdgpu_fence_unref(&fence); + fence_put(fence); amdgpu_asic_set_uvd_clocks(adev, 0, 0); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index b756bd99c0fd..de4b3f57902d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -580,7 +580,7 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); @@ -601,14 +601,14 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto error; } DRM_INFO("ib test on ring %d succeeded\n", ring->idx); error: - amdgpu_fence_unref(&fence); + fence_put(fence); amdgpu_asic_set_uvd_clocks(adev, 0, 0); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 49aa931b2cb4..66c975870e97 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -575,7 +575,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, */ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) { - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); @@ -590,14 +590,14 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto error; } DRM_INFO("ib test on ring %d succeeded\n", ring->idx); error: - amdgpu_fence_unref(&fence); + fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index d1064ca3670e..4349658081ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -205,7 +205,14 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) u32 tmp; unsigned ret; - if (adev->flags & AMDGPU_IS_APU) + /* Fiji is single pipe */ + if (adev->asic_type == CHIP_FIJI) { + ret = AMDGPU_VCE_HARVEST_VCE1; + return ret; + } + + /* Tonga and CZ are dual or single pipe */ + if (adev->flags & AMD_IS_APU) tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & VCE_HARVEST_FUSE_MACRO__MASK) >> VCE_HARVEST_FUSE_MACRO__SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 68552da40287..552d9e75ad1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -203,6 +203,17 @@ static const u32 tonga_mgcg_cgcg_init[] = mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, }; +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, + mmPCIE_INDEX, 0xffffffff, 0x0140001c, + mmPCIE_DATA, 0x000f0000, 0x00000000, + mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, + mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, +}; + static const u32 iceland_mgcg_cgcg_init[] = { mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, @@ -232,6 +243,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) iceland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); break; + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -261,7 +277,7 @@ static u32 vi_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return reference_clock; tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); @@ -362,6 +378,26 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { {mmGRBM_STATUS, false}, + {mmGRBM_STATUS2, false}, + {mmGRBM_STATUS_SE0, false}, + {mmGRBM_STATUS_SE1, false}, + {mmGRBM_STATUS_SE2, false}, + {mmGRBM_STATUS_SE3, false}, + {mmSRBM_STATUS, false}, + {mmSRBM_STATUS2, false}, + {mmSRBM_STATUS3, false}, + {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, + {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, + {mmCP_STAT, false}, + {mmCP_STALLED_STAT1, false}, + {mmCP_STALLED_STAT2, false}, + {mmCP_STALLED_STAT3, false}, + {mmCP_CPF_BUSY_STAT, false}, + {mmCP_CPF_STALLED_STAT1, false}, + {mmCP_CPF_STATUS, false}, + {mmCP_CPC_BUSY_STAT, false}, + {mmCP_CPC_STALLED_STAT1, false}, + {mmCP_CPC_STATUS, false}, {mmGB_ADDR_CONFIG, false}, {mmMC_ARB_RAMCFG, false}, {mmGB_TILE_MODE0, false}, @@ -449,6 +485,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, asic_register_table = tonga_allowed_read_registers; size = ARRAY_SIZE(tonga_allowed_read_registers); break; + case CHIP_FIJI: case CHIP_TONGA: case CHIP_CARRIZO: asic_register_table = cz_allowed_read_registers; @@ -751,7 +788,7 @@ static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { if (reset_mask & AMDGPU_RESET_MC) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); @@ -971,7 +1008,7 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) if (amdgpu_pcie_gen2 == 0) return; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); @@ -999,7 +1036,7 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, u32 tmp; /* not necessary on CZ */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; tmp = RREG32(mmBIF_DOORBELL_APER_EN); @@ -1127,6 +1164,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version fiji_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &fiji_dpm_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 1, + .rev = 0, + .funcs = &dce_v10_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1202,6 +1307,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) adev->ip_blocks = topaz_ip_blocks; adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); + break; case CHIP_TONGA: adev->ip_blocks = tonga_ip_blocks; adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); @@ -1248,7 +1357,7 @@ static int vi_common_early_init(void *handle) bool smc_enabled = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { adev->smc_rreg = &cz_smc_rreg; adev->smc_wreg = &cz_smc_wreg; } else { @@ -1279,6 +1388,7 @@ static int vi_common_early_init(void *handle) if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; break; + case CHIP_FIJI: case CHIP_TONGA: adev->has_uvd = true; adev->cg_flags = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h index 3b45332f5df4..fc120ba18aad 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h @@ -30,7 +30,7 @@ int cz_smu_start(struct amdgpu_device *adev); int cz_smu_fini(struct amdgpu_device *adev); extern const struct amd_ip_funcs tonga_dpm_ip_funcs; - +extern const struct amd_ip_funcs fiji_dpm_ip_funcs; extern const struct amd_ip_funcs iceland_dpm_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index 23ce774ff09d..c6f435aa803f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -143,7 +143,7 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, get_sh_mem_bases_32(qpd_to_pdd(qpd)); else value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << - SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) && + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; q->properties.sdma_vm_addr = value; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index 44c38e8e54d3..7e9cae9d349b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -155,7 +155,7 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, get_sh_mem_bases_32(qpd_to_pdd(qpd)); else value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << - SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) && + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; q->properties.sdma_vm_addr = value; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 5bdf1b4397a0..68a8eaa1b7d0 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -23,6 +23,45 @@ #ifndef __AMD_SHARED_H__ #define __AMD_SHARED_H__ +#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ + +/* +* Supported GPU families (aligned with amdgpu_drm.h) +*/ +#define AMD_FAMILY_UNKNOWN 0 +#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */ +#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ +#define AMD_FAMILY_VI 130 /* Iceland, Tonga */ +#define AMD_FAMILY_CZ 135 /* Carrizo */ + +/* + * Supported ASIC types + */ +enum amd_asic_type { + CHIP_BONAIRE = 0, + CHIP_KAVERI, + CHIP_KABINI, + CHIP_HAWAII, + CHIP_MULLINS, + CHIP_TOPAZ, + CHIP_TONGA, + CHIP_FIJI, + CHIP_CARRIZO, + CHIP_LAST, +}; + +/* + * Chip flags + */ +enum amd_chip_flags { + AMD_ASIC_MASK = 0x0000ffffUL, + AMD_FLAGS_MASK = 0xffff0000UL, + AMD_IS_MOBILITY = 0x00010000UL, + AMD_IS_APU = 0x00020000UL, + AMD_IS_PX = 0x00040000UL, + AMD_EXP_HW_SUPPORT = 0x00080000UL, +}; + enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_COMMON, AMD_IP_BLOCK_TYPE_GMC, diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h new file mode 100644 index 000000000000..44b1855cb8df --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h @@ -0,0 +1,1246 @@ +/* + * SMU_7_1_3 Register documentation + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef SMU_7_1_3_D_H +#define SMU_7_1_3_D_H + +#define mmGCK_SMC_IND_INDEX 0x80 +#define mmGCK0_GCK_SMC_IND_INDEX 0x80 +#define mmGCK1_GCK_SMC_IND_INDEX 0x82 +#define mmGCK2_GCK_SMC_IND_INDEX 0x84 +#define mmGCK3_GCK_SMC_IND_INDEX 0x86 +#define mmGCK_SMC_IND_DATA 0x81 +#define mmGCK0_GCK_SMC_IND_DATA 0x81 +#define mmGCK1_GCK_SMC_IND_DATA 0x83 +#define mmGCK2_GCK_SMC_IND_DATA 0x85 +#define mmGCK3_GCK_SMC_IND_DATA 0x87 +#define ixGCK_MCLK_FUSES 0xc0500008 +#define ixCG_DCLK_CNTL 0xc050009c +#define ixCG_DCLK_STATUS 0xc05000a0 +#define ixCG_VCLK_CNTL 0xc05000a4 +#define ixCG_VCLK_STATUS 0xc05000a8 +#define ixCG_ECLK_CNTL 0xc05000ac +#define ixCG_ECLK_STATUS 0xc05000b0 +#define ixCG_ACLK_CNTL 0xc05000dc +#define ixCG_MCLK_CNTL 0xc0500120 +#define ixCG_MCLK_STATUS 0xc0500124 +#define ixGCK_DFS_BYPASS_CNTL 0xc0500118 +#define ixCG_SPLL_FUNC_CNTL 0xc0500140 +#define ixCG_SPLL_FUNC_CNTL_2 0xc0500144 +#define ixCG_SPLL_FUNC_CNTL_3 0xc0500148 +#define ixCG_SPLL_FUNC_CNTL_4 0xc050014c +#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 +#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 +#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixSPLL_CNTL_MODE 0xc0500160 +#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 +#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 +#define ixMPLL_BYPASSCLK_SEL 0xc050019c +#define ixCG_CLKPIN_CNTL 0xc05001a0 +#define ixCG_CLKPIN_CNTL_2 0xc05001a4 +#define ixCG_CLKPIN_CNTL_DC 0xc0500204 +#define ixTHM_CLK_CNTL 0xc05001a8 +#define ixMISC_CLK_CTRL 0xc05001ac +#define ixGCK_PLL_TEST_CNTL 0xc05001c0 +#define ixGCK_PLL_TEST_CNTL_2 0xc05001c4 +#define ixGCK_ADFS_CLK_BYPASS_CNTL1 0xc05001c8 +#define mmSMC_IND_INDEX 0x80 +#define mmSMC0_SMC_IND_INDEX 0x80 +#define mmSMC1_SMC_IND_INDEX 0x82 +#define mmSMC2_SMC_IND_INDEX 0x84 +#define mmSMC3_SMC_IND_INDEX 0x86 +#define mmSMC_IND_DATA 0x81 +#define mmSMC0_SMC_IND_DATA 0x81 +#define mmSMC1_SMC_IND_DATA 0x83 +#define mmSMC2_SMC_IND_DATA 0x85 +#define mmSMC3_SMC_IND_DATA 0x87 +#define mmSMC_IND_INDEX_0 0x80 +#define mmSMC_IND_DATA_0 0x81 +#define mmSMC_IND_INDEX_1 0x82 +#define mmSMC_IND_DATA_1 0x83 +#define mmSMC_IND_INDEX_2 0x84 +#define mmSMC_IND_DATA_2 0x85 +#define mmSMC_IND_INDEX_3 0x86 +#define mmSMC_IND_DATA_3 0x87 +#define mmSMC_IND_INDEX_4 0x88 +#define mmSMC_IND_DATA_4 0x89 +#define mmSMC_IND_INDEX_5 0x8a +#define mmSMC_IND_DATA_5 0x8b +#define mmSMC_IND_INDEX_6 0x8c +#define mmSMC_IND_DATA_6 0x8d +#define mmSMC_IND_INDEX_7 0x8e +#define mmSMC_IND_DATA_7 0x8f +#define mmSMC_IND_ACCESS_CNTL 0x92 +#define mmSMC_MESSAGE_0 0x94 +#define mmSMC_RESP_0 0x95 +#define mmSMC_MESSAGE_1 0x96 +#define mmSMC_RESP_1 0x97 +#define mmSMC_MESSAGE_2 0x98 +#define mmSMC_RESP_2 0x99 +#define mmSMC_MESSAGE_3 0x9a +#define mmSMC_RESP_3 0x9b +#define mmSMC_MESSAGE_4 0x9c +#define mmSMC_RESP_4 0x9d +#define mmSMC_MESSAGE_5 0x9e +#define mmSMC_RESP_5 0x9f +#define mmSMC_MESSAGE_6 0xa0 +#define mmSMC_RESP_6 0xa1 +#define mmSMC_MESSAGE_7 0xa2 +#define mmSMC_RESP_7 0xa3 +#define mmSMC_MSG_ARG_0 0xa4 +#define mmSMC_MSG_ARG_1 0xa5 +#define mmSMC_MSG_ARG_2 0xa6 +#define mmSMC_MSG_ARG_3 0xa7 +#define mmSMC_MSG_ARG_4 0xa8 +#define mmSMC_MSG_ARG_5 0xa9 +#define mmSMC_MSG_ARG_6 0xaa +#define mmSMC_MSG_ARG_7 0xab +#define mmSMC_MESSAGE_8 0xb5 +#define mmSMC_RESP_8 0xb6 +#define mmSMC_MESSAGE_9 0xb7 +#define mmSMC_RESP_9 0xb8 +#define mmSMC_MESSAGE_10 0xb9 +#define mmSMC_RESP_10 0xba +#define mmSMC_MESSAGE_11 0xbb +#define mmSMC_RESP_11 0xbc +#define mmSMC_MSG_ARG_8 0xbd +#define mmSMC_MSG_ARG_9 0xbe +#define mmSMC_MSG_ARG_10 0xbf +#define mmSMC_MSG_ARG_11 0x93 +#define ixSMC_SYSCON_RESET_CNTL 0x80000000 +#define ixSMC_SYSCON_CLOCK_CNTL_0 0x80000004 +#define ixSMC_SYSCON_CLOCK_CNTL_1 0x80000008 +#define ixSMC_SYSCON_CLOCK_CNTL_2 0x8000000c +#define ixSMC_SYSCON_MISC_CNTL 0x80000010 +#define ixSMC_SYSCON_MSG_ARG_0 0x80000068 +#define ixSMC_PC_C 0x80000370 +#define ixSMC_SCRATCH9 0x80000424 +#define mmGPIOPAD_SW_INT_STAT 0x180 +#define mmGPIOPAD_STRENGTH 0x181 +#define mmGPIOPAD_MASK 0x182 +#define mmGPIOPAD_A 0x183 +#define mmGPIOPAD_EN 0x184 +#define mmGPIOPAD_Y 0x185 +#define mmGPIOPAD_PINSTRAPS 0x186 +#define mmGPIOPAD_INT_STAT_EN 0x187 +#define mmGPIOPAD_INT_STAT 0x188 +#define mmGPIOPAD_INT_STAT_AK 0x189 +#define mmGPIOPAD_INT_EN 0x18a +#define mmGPIOPAD_INT_TYPE 0x18b +#define mmGPIOPAD_INT_POLARITY 0x18c +#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x18d +#define mmGPIOPAD_RCVR_SEL 0x191 +#define mmGPIOPAD_PU_EN 0x192 +#define mmGPIOPAD_PD_EN 0x193 +#define mmCG_FPS_CNT 0x1b6 +#define mmSMU_IND_INDEX_0 0x1a6 +#define mmSMU_IND_DATA_0 0x1a7 +#define mmSMU_IND_INDEX_1 0x1a8 +#define mmSMU_IND_DATA_1 0x1a9 +#define mmSMU_IND_INDEX_2 0x1aa +#define mmSMU_IND_DATA_2 0x1ab +#define mmSMU_IND_INDEX_3 0x1ac +#define mmSMU_IND_DATA_3 0x1ad +#define mmSMU_IND_INDEX_4 0x1ae +#define mmSMU_IND_DATA_4 0x1af +#define mmSMU_IND_INDEX_5 0x1b0 +#define mmSMU_IND_DATA_5 0x1b1 +#define mmSMU_IND_INDEX_6 0x1b2 +#define mmSMU_IND_DATA_6 0x1b3 +#define mmSMU_IND_INDEX_7 0x1b4 +#define mmSMU_IND_DATA_7 0x1b5 +#define mmSMU_SMC_IND_INDEX 0x80 +#define mmSMU0_SMU_SMC_IND_INDEX 0x80 +#define mmSMU1_SMU_SMC_IND_INDEX 0x82 +#define mmSMU2_SMU_SMC_IND_INDEX 0x84 +#define mmSMU3_SMU_SMC_IND_INDEX 0x86 +#define mmSMU_SMC_IND_DATA 0x81 +#define mmSMU0_SMU_SMC_IND_DATA 0x81 +#define mmSMU1_SMU_SMC_IND_DATA 0x83 +#define mmSMU2_SMU_SMC_IND_DATA 0x85 +#define mmSMU3_SMU_SMC_IND_DATA 0x87 +#define ixRCU_UC_EVENTS 0xc0000004 +#define ixRCU_MISC_CTRL 0xc0000010 +#define ixRCU_VIRT_RESET_REQ 0xc0000024 +#define ixCC_RCU_FUSES 0xc00c0000 +#define ixCC_SMU_MISC_FUSES 0xc00c0004 +#define ixCC_SCLK_VID_FUSES 0xc00c0008 +#define ixCC_GIO_IOCCFG_FUSES 0xc00c000c +#define ixCC_GIO_IOC_FUSES 0xc00c0010 +#define ixCC_SMU_TST_EFUSE1_MISC 0xc00c001c +#define ixCC_TST_ID_STRAPS 0xc00c0020 +#define ixCC_FCTRL_FUSES 0xc00c0024 +#define ixCC_HARVEST_FUSES 0xc00c0028 +#define ixSMU_MAIN_PLL_OP_FREQ 0xe0003020 +#define ixSMU_STATUS 0xe0003088 +#define ixSMU_FIRMWARE 0xe00030a4 +#define ixSMU_INPUT_DATA 0xe00030b8 +#define ixSMU_EFUSE_0 0xc0100000 +#define ixFIRMWARE_FLAGS 0x3f000 +#define ixTDC_STATUS 0x3f004 +#define ixTDC_MV_AVERAGE 0x3f008 +#define ixTDC_VRM_LIMIT 0x3f00c +#define ixFEATURE_STATUS 0x3f010 +#define ixENTITY_TEMPERATURES_1 0x3f014 +#define ixMCARB_DRAM_TIMING_TABLE_1 0x3f018 +#define ixMCARB_DRAM_TIMING_TABLE_2 0x3f01c +#define ixMCARB_DRAM_TIMING_TABLE_3 0x3f020 +#define ixMCARB_DRAM_TIMING_TABLE_4 0x3f024 +#define ixMCARB_DRAM_TIMING_TABLE_5 0x3f028 +#define ixMCARB_DRAM_TIMING_TABLE_6 0x3f02c +#define ixMCARB_DRAM_TIMING_TABLE_7 0x3f030 +#define ixMCARB_DRAM_TIMING_TABLE_8 0x3f034 +#define ixMCARB_DRAM_TIMING_TABLE_9 0x3f038 +#define ixMCARB_DRAM_TIMING_TABLE_10 0x3f03c +#define ixMCARB_DRAM_TIMING_TABLE_11 0x3f040 +#define ixMCARB_DRAM_TIMING_TABLE_12 0x3f044 +#define ixMCARB_DRAM_TIMING_TABLE_13 0x3f048 +#define ixMCARB_DRAM_TIMING_TABLE_14 0x3f04c +#define ixMCARB_DRAM_TIMING_TABLE_15 0x3f050 +#define ixMCARB_DRAM_TIMING_TABLE_16 0x3f054 +#define ixMCARB_DRAM_TIMING_TABLE_17 0x3f058 +#define ixMCARB_DRAM_TIMING_TABLE_18 0x3f05c +#define ixMCARB_DRAM_TIMING_TABLE_19 0x3f060 +#define ixMCARB_DRAM_TIMING_TABLE_20 0x3f064 +#define ixMCARB_DRAM_TIMING_TABLE_21 0x3f068 +#define ixMCARB_DRAM_TIMING_TABLE_22 0x3f06c +#define ixMCARB_DRAM_TIMING_TABLE_23 0x3f070 +#define ixMCARB_DRAM_TIMING_TABLE_24 0x3f074 +#define ixMCARB_DRAM_TIMING_TABLE_25 0x3f078 +#define ixMCARB_DRAM_TIMING_TABLE_26 0x3f07c +#define ixMCARB_DRAM_TIMING_TABLE_27 0x3f080 +#define ixMCARB_DRAM_TIMING_TABLE_28 0x3f084 +#define ixMCARB_DRAM_TIMING_TABLE_29 0x3f088 +#define ixMCARB_DRAM_TIMING_TABLE_30 0x3f08c +#define ixMCARB_DRAM_TIMING_TABLE_31 0x3f090 +#define ixMCARB_DRAM_TIMING_TABLE_32 0x3f094 +#define ixMCARB_DRAM_TIMING_TABLE_33 0x3f098 +#define ixMCARB_DRAM_TIMING_TABLE_34 0x3f09c +#define ixMCARB_DRAM_TIMING_TABLE_35 0x3f0a0 +#define ixMCARB_DRAM_TIMING_TABLE_36 0x3f0a4 +#define ixMCARB_DRAM_TIMING_TABLE_37 0x3f0a8 +#define ixMCARB_DRAM_TIMING_TABLE_38 0x3f0ac +#define ixMCARB_DRAM_TIMING_TABLE_39 0x3f0b0 +#define ixMCARB_DRAM_TIMING_TABLE_40 0x3f0b4 +#define ixMCARB_DRAM_TIMING_TABLE_41 0x3f0b8 +#define ixMCARB_DRAM_TIMING_TABLE_42 0x3f0bc +#define ixMCARB_DRAM_TIMING_TABLE_43 0x3f0c0 +#define ixMCARB_DRAM_TIMING_TABLE_44 0x3f0c4 +#define ixMCARB_DRAM_TIMING_TABLE_45 0x3f0c8 +#define ixMCARB_DRAM_TIMING_TABLE_46 0x3f0cc +#define ixMCARB_DRAM_TIMING_TABLE_47 0x3f0d0 +#define ixMCARB_DRAM_TIMING_TABLE_48 0x3f0d4 +#define ixMCARB_DRAM_TIMING_TABLE_49 0x3f0d8 +#define ixMCARB_DRAM_TIMING_TABLE_50 0x3f0dc +#define ixMCARB_DRAM_TIMING_TABLE_51 0x3f0e0 +#define ixMCARB_DRAM_TIMING_TABLE_52 0x3f0e4 +#define ixMCARB_DRAM_TIMING_TABLE_53 0x3f0e8 +#define ixMCARB_DRAM_TIMING_TABLE_54 0x3f0ec +#define ixMCARB_DRAM_TIMING_TABLE_55 0x3f0f0 +#define ixMCARB_DRAM_TIMING_TABLE_56 0x3f0f4 +#define ixMCARB_DRAM_TIMING_TABLE_57 0x3f0f8 +#define ixMCARB_DRAM_TIMING_TABLE_58 0x3f0fc +#define ixMCARB_DRAM_TIMING_TABLE_59 0x3f100 +#define ixMCARB_DRAM_TIMING_TABLE_60 0x3f104 +#define ixMCARB_DRAM_TIMING_TABLE_61 0x3f108 +#define ixMCARB_DRAM_TIMING_TABLE_62 0x3f10c +#define ixMCARB_DRAM_TIMING_TABLE_63 0x3f110 +#define ixMCARB_DRAM_TIMING_TABLE_64 0x3f114 +#define ixMCARB_DRAM_TIMING_TABLE_65 0x3f118 +#define ixMCARB_DRAM_TIMING_TABLE_66 0x3f11c +#define ixMCARB_DRAM_TIMING_TABLE_67 0x3f120 +#define ixMCARB_DRAM_TIMING_TABLE_68 0x3f124 +#define ixMCARB_DRAM_TIMING_TABLE_69 0x3f128 +#define ixMCARB_DRAM_TIMING_TABLE_70 0x3f12c +#define ixMCARB_DRAM_TIMING_TABLE_71 0x3f130 +#define ixMCARB_DRAM_TIMING_TABLE_72 0x3f134 +#define ixMCARB_DRAM_TIMING_TABLE_73 0x3f138 +#define ixMCARB_DRAM_TIMING_TABLE_74 0x3f13c +#define ixMCARB_DRAM_TIMING_TABLE_75 0x3f140 +#define ixMCARB_DRAM_TIMING_TABLE_76 0x3f144 +#define ixMCARB_DRAM_TIMING_TABLE_77 0x3f148 +#define ixMCARB_DRAM_TIMING_TABLE_78 0x3f14c +#define ixMCARB_DRAM_TIMING_TABLE_79 0x3f150 +#define ixMCARB_DRAM_TIMING_TABLE_80 0x3f154 +#define ixMCARB_DRAM_TIMING_TABLE_81 0x3f158 +#define ixMCARB_DRAM_TIMING_TABLE_82 0x3f15c +#define ixMCARB_DRAM_TIMING_TABLE_83 0x3f160 +#define ixMCARB_DRAM_TIMING_TABLE_84 0x3f164 +#define ixMCARB_DRAM_TIMING_TABLE_85 0x3f168 +#define ixMCARB_DRAM_TIMING_TABLE_86 0x3f16c +#define ixMCARB_DRAM_TIMING_TABLE_87 0x3f170 +#define ixMCARB_DRAM_TIMING_TABLE_88 0x3f174 +#define ixMCARB_DRAM_TIMING_TABLE_89 0x3f178 +#define ixMCARB_DRAM_TIMING_TABLE_90 0x3f17c +#define ixMCARB_DRAM_TIMING_TABLE_91 0x3f180 +#define ixMCARB_DRAM_TIMING_TABLE_92 0x3f184 +#define ixMCARB_DRAM_TIMING_TABLE_93 0x3f188 +#define ixMCARB_DRAM_TIMING_TABLE_94 0x3f18c +#define ixMCARB_DRAM_TIMING_TABLE_95 0x3f190 +#define ixMCARB_DRAM_TIMING_TABLE_96 0x3f194 +#define ixDPM_TABLE_1 0x3f198 +#define ixDPM_TABLE_2 0x3f19c +#define ixDPM_TABLE_3 0x3f1a0 +#define ixDPM_TABLE_4 0x3f1a4 +#define ixDPM_TABLE_5 0x3f1a8 +#define ixDPM_TABLE_6 0x3f1ac +#define ixDPM_TABLE_7 0x3f1b0 +#define ixDPM_TABLE_8 0x3f1b4 +#define ixDPM_TABLE_9 0x3f1b8 +#define ixDPM_TABLE_10 0x3f1bc +#define ixDPM_TABLE_11 0x3f1c0 +#define ixDPM_TABLE_12 0x3f1c4 +#define ixDPM_TABLE_13 0x3f1c8 +#define ixDPM_TABLE_14 0x3f1cc +#define ixDPM_TABLE_15 0x3f1d0 +#define ixDPM_TABLE_16 0x3f1d4 +#define ixDPM_TABLE_17 0x3f1d8 +#define ixDPM_TABLE_18 0x3f1dc +#define ixDPM_TABLE_19 0x3f1e0 +#define ixDPM_TABLE_20 0x3f1e4 +#define ixDPM_TABLE_21 0x3f1e8 +#define ixDPM_TABLE_22 0x3f1ec +#define ixDPM_TABLE_23 0x3f1f0 +#define ixDPM_TABLE_24 0x3f1f4 +#define ixDPM_TABLE_25 0x3f1f8 +#define ixDPM_TABLE_26 0x3f1fc +#define ixDPM_TABLE_27 0x3f200 +#define ixDPM_TABLE_28 0x3f204 +#define ixDPM_TABLE_29 0x3f208 +#define ixDPM_TABLE_30 0x3f20c +#define ixDPM_TABLE_31 0x3f210 +#define ixDPM_TABLE_32 0x3f214 +#define ixDPM_TABLE_33 0x3f218 +#define ixDPM_TABLE_34 0x3f21c +#define ixDPM_TABLE_35 0x3f220 +#define ixDPM_TABLE_36 0x3f224 +#define ixDPM_TABLE_37 0x3f228 +#define ixDPM_TABLE_38 0x3f22c +#define ixDPM_TABLE_39 0x3f230 +#define ixDPM_TABLE_40 0x3f234 +#define ixDPM_TABLE_41 0x3f238 +#define ixDPM_TABLE_42 0x3f23c +#define ixDPM_TABLE_43 0x3f240 +#define ixDPM_TABLE_44 0x3f244 +#define ixDPM_TABLE_45 0x3f248 +#define ixDPM_TABLE_46 0x3f24c +#define ixDPM_TABLE_47 0x3f250 +#define ixDPM_TABLE_48 0x3f254 +#define ixDPM_TABLE_49 0x3f258 +#define ixDPM_TABLE_50 0x3f25c +#define ixDPM_TABLE_51 0x3f260 +#define ixDPM_TABLE_52 0x3f264 +#define ixDPM_TABLE_53 0x3f268 +#define ixDPM_TABLE_54 0x3f26c +#define ixDPM_TABLE_55 0x3f270 +#define ixDPM_TABLE_56 0x3f274 +#define ixDPM_TABLE_57 0x3f278 +#define ixDPM_TABLE_58 0x3f27c +#define ixDPM_TABLE_59 0x3f280 +#define ixDPM_TABLE_60 0x3f284 +#define ixDPM_TABLE_61 0x3f288 +#define ixDPM_TABLE_62 0x3f28c +#define ixDPM_TABLE_63 0x3f290 +#define ixDPM_TABLE_64 0x3f294 +#define ixDPM_TABLE_65 0x3f298 +#define ixDPM_TABLE_66 0x3f29c +#define ixDPM_TABLE_67 0x3f2a0 +#define ixDPM_TABLE_68 0x3f2a4 +#define ixDPM_TABLE_69 0x3f2a8 +#define ixDPM_TABLE_70 0x3f2ac +#define ixDPM_TABLE_71 0x3f2b0 +#define ixDPM_TABLE_72 0x3f2b4 +#define ixDPM_TABLE_73 0x3f2b8 +#define ixDPM_TABLE_74 0x3f2bc +#define ixDPM_TABLE_75 0x3f2c0 +#define ixDPM_TABLE_76 0x3f2c4 +#define ixDPM_TABLE_77 0x3f2c8 +#define ixDPM_TABLE_78 0x3f2cc +#define ixDPM_TABLE_79 0x3f2d0 +#define ixDPM_TABLE_80 0x3f2d4 +#define ixDPM_TABLE_81 0x3f2d8 +#define ixDPM_TABLE_82 0x3f2dc +#define ixDPM_TABLE_83 0x3f2e0 +#define ixDPM_TABLE_84 0x3f2e4 +#define ixDPM_TABLE_85 0x3f2e8 +#define ixDPM_TABLE_86 0x3f2ec +#define ixDPM_TABLE_87 0x3f2f0 +#define ixDPM_TABLE_88 0x3f2f4 +#define ixDPM_TABLE_89 0x3f2f8 +#define ixDPM_TABLE_90 0x3f2fc +#define ixDPM_TABLE_91 0x3f300 +#define ixDPM_TABLE_92 0x3f304 +#define ixDPM_TABLE_93 0x3f308 +#define ixDPM_TABLE_94 0x3f30c +#define ixDPM_TABLE_95 0x3f310 +#define ixDPM_TABLE_96 0x3f314 +#define ixDPM_TABLE_97 0x3f318 +#define ixDPM_TABLE_98 0x3f31c +#define ixDPM_TABLE_99 0x3f320 +#define ixDPM_TABLE_100 0x3f324 +#define ixDPM_TABLE_101 0x3f328 +#define ixDPM_TABLE_102 0x3f32c +#define ixDPM_TABLE_103 0x3f330 +#define ixDPM_TABLE_104 0x3f334 +#define ixDPM_TABLE_105 0x3f338 +#define ixDPM_TABLE_106 0x3f33c +#define ixDPM_TABLE_107 0x3f340 +#define ixDPM_TABLE_108 0x3f344 +#define ixDPM_TABLE_109 0x3f348 +#define ixDPM_TABLE_110 0x3f34c +#define ixDPM_TABLE_111 0x3f350 +#define ixDPM_TABLE_112 0x3f354 +#define ixDPM_TABLE_113 0x3f358 +#define ixDPM_TABLE_114 0x3f35c +#define ixDPM_TABLE_115 0x3f360 +#define ixDPM_TABLE_116 0x3f364 +#define ixDPM_TABLE_117 0x3f368 +#define ixDPM_TABLE_118 0x3f36c +#define ixDPM_TABLE_119 0x3f370 +#define ixDPM_TABLE_120 0x3f374 +#define ixDPM_TABLE_121 0x3f378 +#define ixDPM_TABLE_122 0x3f37c +#define ixDPM_TABLE_123 0x3f380 +#define ixDPM_TABLE_124 0x3f384 +#define ixDPM_TABLE_125 0x3f388 +#define ixDPM_TABLE_126 0x3f38c +#define ixDPM_TABLE_127 0x3f390 +#define ixDPM_TABLE_128 0x3f394 +#define ixDPM_TABLE_129 0x3f398 +#define ixDPM_TABLE_130 0x3f39c +#define ixDPM_TABLE_131 0x3f3a0 +#define ixDPM_TABLE_132 0x3f3a4 +#define ixDPM_TABLE_133 0x3f3a8 +#define ixDPM_TABLE_134 0x3f3ac +#define ixDPM_TABLE_135 0x3f3b0 +#define ixDPM_TABLE_136 0x3f3b4 +#define ixDPM_TABLE_137 0x3f3b8 +#define ixDPM_TABLE_138 0x3f3bc +#define ixDPM_TABLE_139 0x3f3c0 +#define ixDPM_TABLE_140 0x3f3c4 +#define ixDPM_TABLE_141 0x3f3c8 +#define ixDPM_TABLE_142 0x3f3cc +#define ixDPM_TABLE_143 0x3f3d0 +#define ixDPM_TABLE_144 0x3f3d4 +#define ixDPM_TABLE_145 0x3f3d8 +#define ixDPM_TABLE_146 0x3f3dc +#define ixDPM_TABLE_147 0x3f3e0 +#define ixDPM_TABLE_148 0x3f3e4 +#define ixDPM_TABLE_149 0x3f3e8 +#define ixDPM_TABLE_150 0x3f3ec +#define ixDPM_TABLE_151 0x3f3f0 +#define ixDPM_TABLE_152 0x3f3f4 +#define ixDPM_TABLE_153 0x3f3f8 +#define ixDPM_TABLE_154 0x3f3fc +#define ixDPM_TABLE_155 0x3f400 +#define ixDPM_TABLE_156 0x3f404 +#define ixDPM_TABLE_157 0x3f408 +#define ixDPM_TABLE_158 0x3f40c +#define ixDPM_TABLE_159 0x3f410 +#define ixDPM_TABLE_160 0x3f414 +#define ixDPM_TABLE_161 0x3f418 +#define ixDPM_TABLE_162 0x3f41c +#define ixDPM_TABLE_163 0x3f420 +#define ixDPM_TABLE_164 0x3f424 +#define ixDPM_TABLE_165 0x3f428 +#define ixDPM_TABLE_166 0x3f42c +#define ixDPM_TABLE_167 0x3f430 +#define ixDPM_TABLE_168 0x3f434 +#define ixDPM_TABLE_169 0x3f438 +#define ixDPM_TABLE_170 0x3f43c +#define ixDPM_TABLE_171 0x3f440 +#define ixDPM_TABLE_172 0x3f444 +#define ixDPM_TABLE_173 0x3f448 +#define ixDPM_TABLE_174 0x3f44c +#define ixDPM_TABLE_175 0x3f450 +#define ixDPM_TABLE_176 0x3f454 +#define ixDPM_TABLE_177 0x3f458 +#define ixDPM_TABLE_178 0x3f45c +#define ixDPM_TABLE_179 0x3f460 +#define ixDPM_TABLE_180 0x3f464 +#define ixDPM_TABLE_181 0x3f468 +#define ixDPM_TABLE_182 0x3f46c +#define ixDPM_TABLE_183 0x3f470 +#define ixDPM_TABLE_184 0x3f474 +#define ixDPM_TABLE_185 0x3f478 +#define ixDPM_TABLE_186 0x3f47c +#define ixDPM_TABLE_187 0x3f480 +#define ixDPM_TABLE_188 0x3f484 +#define ixDPM_TABLE_189 0x3f488 +#define ixDPM_TABLE_190 0x3f48c +#define ixDPM_TABLE_191 0x3f490 +#define ixDPM_TABLE_192 0x3f494 +#define ixDPM_TABLE_193 0x3f498 +#define ixDPM_TABLE_194 0x3f49c +#define ixDPM_TABLE_195 0x3f4a0 +#define ixDPM_TABLE_196 0x3f4a4 +#define ixDPM_TABLE_197 0x3f4a8 +#define ixDPM_TABLE_198 0x3f4ac +#define ixDPM_TABLE_199 0x3f4b0 +#define ixDPM_TABLE_200 0x3f4b4 +#define ixDPM_TABLE_201 0x3f4b8 +#define ixDPM_TABLE_202 0x3f4bc +#define ixDPM_TABLE_203 0x3f4c0 +#define ixDPM_TABLE_204 0x3f4c4 +#define ixDPM_TABLE_205 0x3f4c8 +#define ixDPM_TABLE_206 0x3f4cc +#define ixDPM_TABLE_207 0x3f4d0 +#define ixDPM_TABLE_208 0x3f4d4 +#define ixDPM_TABLE_209 0x3f4d8 +#define ixDPM_TABLE_210 0x3f4dc +#define ixDPM_TABLE_211 0x3f4e0 +#define ixDPM_TABLE_212 0x3f4e4 +#define ixDPM_TABLE_213 0x3f4e8 +#define ixDPM_TABLE_214 0x3f4ec +#define ixDPM_TABLE_215 0x3f4f0 +#define ixDPM_TABLE_216 0x3f4f4 +#define ixDPM_TABLE_217 0x3f4f8 +#define ixDPM_TABLE_218 0x3f4fc +#define ixDPM_TABLE_219 0x3f500 +#define ixDPM_TABLE_220 0x3f504 +#define ixDPM_TABLE_221 0x3f508 +#define ixDPM_TABLE_222 0x3f50c +#define ixDPM_TABLE_223 0x3f510 +#define ixDPM_TABLE_224 0x3f514 +#define ixDPM_TABLE_225 0x3f518 +#define ixDPM_TABLE_226 0x3f51c +#define ixDPM_TABLE_227 0x3f520 +#define ixDPM_TABLE_228 0x3f524 +#define ixDPM_TABLE_229 0x3f528 +#define ixDPM_TABLE_230 0x3f52c +#define ixDPM_TABLE_231 0x3f530 +#define ixDPM_TABLE_232 0x3f534 +#define ixDPM_TABLE_233 0x3f538 +#define ixDPM_TABLE_234 0x3f53c +#define ixDPM_TABLE_235 0x3f540 +#define ixDPM_TABLE_236 0x3f544 +#define ixDPM_TABLE_237 0x3f548 +#define ixDPM_TABLE_238 0x3f54c +#define ixDPM_TABLE_239 0x3f550 +#define ixDPM_TABLE_240 0x3f554 +#define ixDPM_TABLE_241 0x3f558 +#define ixDPM_TABLE_242 0x3f55c +#define ixDPM_TABLE_243 0x3f560 +#define ixDPM_TABLE_244 0x3f564 +#define ixDPM_TABLE_245 0x3f568 +#define ixDPM_TABLE_246 0x3f56c +#define ixDPM_TABLE_247 0x3f570 +#define ixDPM_TABLE_248 0x3f574 +#define ixDPM_TABLE_249 0x3f578 +#define ixDPM_TABLE_250 0x3f57c +#define ixDPM_TABLE_251 0x3f580 +#define ixDPM_TABLE_252 0x3f584 +#define ixDPM_TABLE_253 0x3f588 +#define ixDPM_TABLE_254 0x3f58c +#define ixDPM_TABLE_255 0x3f590 +#define ixDPM_TABLE_256 0x3f594 +#define ixDPM_TABLE_257 0x3f598 +#define ixDPM_TABLE_258 0x3f59c +#define ixDPM_TABLE_259 0x3f5a0 +#define ixDPM_TABLE_260 0x3f5a4 +#define ixDPM_TABLE_261 0x3f5a8 +#define ixDPM_TABLE_262 0x3f5ac +#define ixDPM_TABLE_263 0x3f5b0 +#define ixDPM_TABLE_264 0x3f5b4 +#define ixDPM_TABLE_265 0x3f5b8 +#define ixDPM_TABLE_266 0x3f5bc +#define ixDPM_TABLE_267 0x3f5c0 +#define ixDPM_TABLE_268 0x3f5c4 +#define ixDPM_TABLE_269 0x3f5c8 +#define ixDPM_TABLE_270 0x3f5cc +#define ixDPM_TABLE_271 0x3f5d0 +#define ixDPM_TABLE_272 0x3f5d4 +#define ixDPM_TABLE_273 0x3f5d8 +#define ixDPM_TABLE_274 0x3f5dc +#define ixDPM_TABLE_275 0x3f5e0 +#define ixDPM_TABLE_276 0x3f5e4 +#define ixDPM_TABLE_277 0x3f5e8 +#define ixDPM_TABLE_278 0x3f5ec +#define ixDPM_TABLE_279 0x3f5f0 +#define ixDPM_TABLE_280 0x3f5f4 +#define ixDPM_TABLE_281 0x3f5f8 +#define ixDPM_TABLE_282 0x3f5fc +#define ixDPM_TABLE_283 0x3f600 +#define ixDPM_TABLE_284 0x3f604 +#define ixDPM_TABLE_285 0x3f608 +#define ixDPM_TABLE_286 0x3f60c +#define ixDPM_TABLE_287 0x3f610 +#define ixDPM_TABLE_288 0x3f614 +#define ixDPM_TABLE_289 0x3f618 +#define ixDPM_TABLE_290 0x3f61c +#define ixDPM_TABLE_291 0x3f620 +#define ixDPM_TABLE_292 0x3f624 +#define ixDPM_TABLE_293 0x3f628 +#define ixDPM_TABLE_294 0x3f62c +#define ixDPM_TABLE_295 0x3f630 +#define ixDPM_TABLE_296 0x3f634 +#define ixDPM_TABLE_297 0x3f638 +#define ixDPM_TABLE_298 0x3f63c +#define ixDPM_TABLE_299 0x3f640 +#define ixDPM_TABLE_300 0x3f644 +#define ixDPM_TABLE_301 0x3f648 +#define ixDPM_TABLE_302 0x3f64c +#define ixDPM_TABLE_303 0x3f650 +#define ixDPM_TABLE_304 0x3f654 +#define ixDPM_TABLE_305 0x3f658 +#define ixDPM_TABLE_306 0x3f65c +#define ixDPM_TABLE_307 0x3f660 +#define ixDPM_TABLE_308 0x3f664 +#define ixDPM_TABLE_309 0x3f668 +#define ixDPM_TABLE_310 0x3f66c +#define ixDPM_TABLE_311 0x3f670 +#define ixDPM_TABLE_312 0x3f674 +#define ixDPM_TABLE_313 0x3f678 +#define ixDPM_TABLE_314 0x3f67c +#define ixDPM_TABLE_315 0x3f680 +#define ixDPM_TABLE_316 0x3f684 +#define ixDPM_TABLE_317 0x3f688 +#define ixDPM_TABLE_318 0x3f68c +#define ixDPM_TABLE_319 0x3f690 +#define ixDPM_TABLE_320 0x3f694 +#define ixDPM_TABLE_321 0x3f698 +#define ixDPM_TABLE_322 0x3f69c +#define ixDPM_TABLE_323 0x3f6a0 +#define ixDPM_TABLE_324 0x3f6a4 +#define ixDPM_TABLE_325 0x3f6a8 +#define ixDPM_TABLE_326 0x3f6ac +#define ixDPM_TABLE_327 0x3f6b0 +#define ixDPM_TABLE_328 0x3f6b4 +#define ixDPM_TABLE_329 0x3f6b8 +#define ixDPM_TABLE_330 0x3f6bc +#define ixDPM_TABLE_331 0x3f6c0 +#define ixDPM_TABLE_332 0x3f6c4 +#define ixDPM_TABLE_333 0x3f6c8 +#define ixDPM_TABLE_334 0x3f6cc +#define ixDPM_TABLE_335 0x3f6d0 +#define ixDPM_TABLE_336 0x3f6d4 +#define ixDPM_TABLE_337 0x3f6d8 +#define ixDPM_TABLE_338 0x3f6dc +#define ixDPM_TABLE_339 0x3f6e0 +#define ixDPM_TABLE_340 0x3f6e4 +#define ixDPM_TABLE_341 0x3f6e8 +#define ixDPM_TABLE_342 0x3f6ec +#define ixDPM_TABLE_343 0x3f6f0 +#define ixDPM_TABLE_344 0x3f6f4 +#define ixDPM_TABLE_345 0x3f6f8 +#define ixDPM_TABLE_346 0x3f6fc +#define ixDPM_TABLE_347 0x3f700 +#define ixDPM_TABLE_348 0x3f704 +#define ixDPM_TABLE_349 0x3f708 +#define ixDPM_TABLE_350 0x3f70c +#define ixDPM_TABLE_351 0x3f710 +#define ixDPM_TABLE_352 0x3f714 +#define ixDPM_TABLE_353 0x3f718 +#define ixDPM_TABLE_354 0x3f71c +#define ixDPM_TABLE_355 0x3f720 +#define ixDPM_TABLE_356 0x3f724 +#define ixDPM_TABLE_357 0x3f728 +#define ixDPM_TABLE_358 0x3f72c +#define ixDPM_TABLE_359 0x3f730 +#define ixDPM_TABLE_360 0x3f734 +#define ixDPM_TABLE_361 0x3f738 +#define ixDPM_TABLE_362 0x3f73c +#define ixDPM_TABLE_363 0x3f740 +#define ixDPM_TABLE_364 0x3f744 +#define ixDPM_TABLE_365 0x3f748 +#define ixDPM_TABLE_366 0x3f74c +#define ixDPM_TABLE_367 0x3f750 +#define ixDPM_TABLE_368 0x3f754 +#define ixDPM_TABLE_369 0x3f758 +#define ixDPM_TABLE_370 0x3f75c +#define ixDPM_TABLE_371 0x3f760 +#define ixDPM_TABLE_372 0x3f764 +#define ixDPM_TABLE_373 0x3f768 +#define ixDPM_TABLE_374 0x3f76c +#define ixDPM_TABLE_375 0x3f770 +#define ixDPM_TABLE_376 0x3f774 +#define ixDPM_TABLE_377 0x3f778 +#define ixDPM_TABLE_378 0x3f77c +#define ixDPM_TABLE_379 0x3f780 +#define ixDPM_TABLE_380 0x3f784 +#define ixDPM_TABLE_381 0x3f788 +#define ixDPM_TABLE_382 0x3f78c +#define ixDPM_TABLE_383 0x3f790 +#define ixDPM_TABLE_384 0x3f794 +#define ixDPM_TABLE_385 0x3f798 +#define ixDPM_TABLE_386 0x3f79c +#define ixDPM_TABLE_387 0x3f7a0 +#define ixDPM_TABLE_388 0x3f7a4 +#define ixDPM_TABLE_389 0x3f7a8 +#define ixDPM_TABLE_390 0x3f7ac +#define ixDPM_TABLE_391 0x3f7b0 +#define ixDPM_TABLE_392 0x3f7b4 +#define ixDPM_TABLE_393 0x3f7b8 +#define ixDPM_TABLE_394 0x3f7bc +#define ixDPM_TABLE_395 0x3f7c0 +#define ixDPM_TABLE_396 0x3f7c4 +#define ixDPM_TABLE_397 0x3f7c8 +#define ixDPM_TABLE_398 0x3f7cc +#define ixDPM_TABLE_399 0x3f7d0 +#define ixDPM_TABLE_400 0x3f7d4 +#define ixDPM_TABLE_401 0x3f7d8 +#define ixDPM_TABLE_402 0x3f7dc +#define ixDPM_TABLE_403 0x3f7e0 +#define ixDPM_TABLE_404 0x3f7e4 +#define ixDPM_TABLE_405 0x3f7e8 +#define ixDPM_TABLE_406 0x3f7ec +#define ixDPM_TABLE_407 0x3f7f0 +#define ixDPM_TABLE_408 0x3f7f4 +#define ixDPM_TABLE_409 0x3f7f8 +#define ixDPM_TABLE_410 0x3f7fc +#define ixDPM_TABLE_411 0x3f800 +#define ixDPM_TABLE_412 0x3f804 +#define ixDPM_TABLE_413 0x3f808 +#define ixDPM_TABLE_414 0x3f80c +#define ixDPM_TABLE_415 0x3f810 +#define ixDPM_TABLE_416 0x3f814 +#define ixDPM_TABLE_417 0x3f818 +#define ixDPM_TABLE_418 0x3f81c +#define ixDPM_TABLE_419 0x3f820 +#define ixDPM_TABLE_420 0x3f824 +#define ixDPM_TABLE_421 0x3f828 +#define ixDPM_TABLE_422 0x3f82c +#define ixDPM_TABLE_423 0x3f830 +#define ixDPM_TABLE_424 0x3f834 +#define ixDPM_TABLE_425 0x3f838 +#define ixDPM_TABLE_426 0x3f83c +#define ixDPM_TABLE_427 0x3f840 +#define ixDPM_TABLE_428 0x3f844 +#define ixDPM_TABLE_429 0x3f848 +#define ixDPM_TABLE_430 0x3f84c +#define ixDPM_TABLE_431 0x3f850 +#define ixDPM_TABLE_432 0x3f854 +#define ixDPM_TABLE_433 0x3f858 +#define ixDPM_TABLE_434 0x3f85c +#define ixDPM_TABLE_435 0x3f860 +#define ixDPM_TABLE_436 0x3f864 +#define ixDPM_TABLE_437 0x3f868 +#define ixDPM_TABLE_438 0x3f86c +#define ixDPM_TABLE_439 0x3f870 +#define ixDPM_TABLE_440 0x3f874 +#define ixSOFT_REGISTERS_TABLE_1 0x3f89c +#define ixSOFT_REGISTERS_TABLE_2 0x3f8a0 +#define ixSOFT_REGISTERS_TABLE_3 0x3f8a4 +#define ixSOFT_REGISTERS_TABLE_4 0x3f8a8 +#define ixSOFT_REGISTERS_TABLE_5 0x3f8ac +#define ixSOFT_REGISTERS_TABLE_6 0x3f8b0 +#define ixSOFT_REGISTERS_TABLE_7 0x3f8b4 +#define ixSOFT_REGISTERS_TABLE_8 0x3f8b8 +#define ixSOFT_REGISTERS_TABLE_9 0x3f8bc +#define ixSOFT_REGISTERS_TABLE_10 0x3f8c0 +#define ixSOFT_REGISTERS_TABLE_11 0x3f8c4 +#define ixSOFT_REGISTERS_TABLE_12 0x3f8c8 +#define ixSOFT_REGISTERS_TABLE_13 0x3f8cc +#define ixSOFT_REGISTERS_TABLE_14 0x3f8d0 +#define ixSOFT_REGISTERS_TABLE_15 0x3f8d4 +#define ixSOFT_REGISTERS_TABLE_16 0x3f8d8 +#define ixSOFT_REGISTERS_TABLE_17 0x3f8dc +#define ixSOFT_REGISTERS_TABLE_18 0x3f8e0 +#define ixSOFT_REGISTERS_TABLE_19 0x3f8e4 +#define ixSOFT_REGISTERS_TABLE_20 0x3f8e8 +#define ixSOFT_REGISTERS_TABLE_21 0x3f8ec +#define ixSOFT_REGISTERS_TABLE_22 0x3f8f0 +#define ixSOFT_REGISTERS_TABLE_23 0x3f8f4 +#define ixSOFT_REGISTERS_TABLE_24 0x3f8f8 +#define ixSOFT_REGISTERS_TABLE_25 0x3f8fc +#define ixSOFT_REGISTERS_TABLE_26 0x3f900 +#define ixSOFT_REGISTERS_TABLE_27 0x3f904 +#define ixSOFT_REGISTERS_TABLE_28 0x3f888 +#define ixSOFT_REGISTERS_TABLE_29 0x3f90c +#define ixSOFT_REGISTERS_TABLE_30 0x3f910 +#define ixPM_FUSES_1 0x3f914 +#define ixPM_FUSES_2 0x3f918 +#define ixPM_FUSES_3 0x3f91c +#define ixPM_FUSES_4 0x3f920 +#define ixPM_FUSES_5 0x3f924 +#define ixPM_FUSES_6 0x3f928 +#define ixPM_FUSES_7 0x3f92c +#define ixPM_FUSES_8 0x3f930 +#define ixPM_FUSES_9 0x3f934 +#define ixPM_FUSES_10 0x3f938 +#define ixPM_FUSES_11 0x3f93c +#define ixPM_FUSES_12 0x3f940 +#define ixPM_FUSES_13 0x3f944 +#define ixPM_FUSES_14 0x3f948 +#define ixPM_FUSES_15 0x3f94c +#define ixSMU_PM_STATUS_0 0x3fe00 +#define ixSMU_PM_STATUS_1 0x3fe04 +#define ixSMU_PM_STATUS_2 0x3fe08 +#define ixSMU_PM_STATUS_3 0x3fe0c +#define ixSMU_PM_STATUS_4 0x3fe10 +#define ixSMU_PM_STATUS_5 0x3fe14 +#define ixSMU_PM_STATUS_6 0x3fe18 +#define ixSMU_PM_STATUS_7 0x3fe1c +#define ixSMU_PM_STATUS_8 0x3fe20 +#define ixSMU_PM_STATUS_9 0x3fe24 +#define ixSMU_PM_STATUS_10 0x3fe28 +#define ixSMU_PM_STATUS_11 0x3fe2c +#define ixSMU_PM_STATUS_12 0x3fe30 +#define ixSMU_PM_STATUS_13 0x3fe34 +#define ixSMU_PM_STATUS_14 0x3fe38 +#define ixSMU_PM_STATUS_15 0x3fe3c +#define ixSMU_PM_STATUS_16 0x3fe40 +#define ixSMU_PM_STATUS_17 0x3fe44 +#define ixSMU_PM_STATUS_18 0x3fe48 +#define ixSMU_PM_STATUS_19 0x3fe4c +#define ixSMU_PM_STATUS_20 0x3fe50 +#define ixSMU_PM_STATUS_21 0x3fe54 +#define ixSMU_PM_STATUS_22 0x3fe58 +#define ixSMU_PM_STATUS_23 0x3fe5c +#define ixSMU_PM_STATUS_24 0x3fe60 +#define ixSMU_PM_STATUS_25 0x3fe64 +#define ixSMU_PM_STATUS_26 0x3fe68 +#define ixSMU_PM_STATUS_27 0x3fe6c +#define ixSMU_PM_STATUS_28 0x3fe70 +#define ixSMU_PM_STATUS_29 0x3fe74 +#define ixSMU_PM_STATUS_30 0x3fe78 +#define ixSMU_PM_STATUS_31 0x3fe7c +#define ixSMU_PM_STATUS_32 0x3fe80 +#define ixSMU_PM_STATUS_33 0x3fe84 +#define ixSMU_PM_STATUS_34 0x3fe88 +#define ixSMU_PM_STATUS_35 0x3fe8c +#define ixSMU_PM_STATUS_36 0x3fe90 +#define ixSMU_PM_STATUS_37 0x3fe94 +#define ixSMU_PM_STATUS_38 0x3fe98 +#define ixSMU_PM_STATUS_39 0x3fe9c +#define ixSMU_PM_STATUS_40 0x3fea0 +#define ixSMU_PM_STATUS_41 0x3fea4 +#define ixSMU_PM_STATUS_42 0x3fea8 +#define ixSMU_PM_STATUS_43 0x3feac +#define ixSMU_PM_STATUS_44 0x3feb0 +#define ixSMU_PM_STATUS_45 0x3feb4 +#define ixSMU_PM_STATUS_46 0x3feb8 +#define ixSMU_PM_STATUS_47 0x3febc +#define ixSMU_PM_STATUS_48 0x3fec0 +#define ixSMU_PM_STATUS_49 0x3fec4 +#define ixSMU_PM_STATUS_50 0x3fec8 +#define ixSMU_PM_STATUS_51 0x3fecc +#define ixSMU_PM_STATUS_52 0x3fed0 +#define ixSMU_PM_STATUS_53 0x3fed4 +#define ixSMU_PM_STATUS_54 0x3fed8 +#define ixSMU_PM_STATUS_55 0x3fedc +#define ixSMU_PM_STATUS_56 0x3fee0 +#define ixSMU_PM_STATUS_57 0x3fee4 +#define ixSMU_PM_STATUS_58 0x3fee8 +#define ixSMU_PM_STATUS_59 0x3feec +#define ixSMU_PM_STATUS_60 0x3fef0 +#define ixSMU_PM_STATUS_61 0x3fef4 +#define ixSMU_PM_STATUS_62 0x3fef8 +#define ixSMU_PM_STATUS_63 0x3fefc +#define ixSMU_PM_STATUS_64 0x3ff00 +#define ixSMU_PM_STATUS_65 0x3ff04 +#define ixSMU_PM_STATUS_66 0x3ff08 +#define ixSMU_PM_STATUS_67 0x3ff0c +#define ixSMU_PM_STATUS_68 0x3ff10 +#define ixSMU_PM_STATUS_69 0x3ff14 +#define ixSMU_PM_STATUS_70 0x3ff18 +#define ixSMU_PM_STATUS_71 0x3ff1c +#define ixSMU_PM_STATUS_72 0x3ff20 +#define ixSMU_PM_STATUS_73 0x3ff24 +#define ixSMU_PM_STATUS_74 0x3ff28 +#define ixSMU_PM_STATUS_75 0x3ff2c +#define ixSMU_PM_STATUS_76 0x3ff30 +#define ixSMU_PM_STATUS_77 0x3ff34 +#define ixSMU_PM_STATUS_78 0x3ff38 +#define ixSMU_PM_STATUS_79 0x3ff3c +#define ixSMU_PM_STATUS_80 0x3ff40 +#define ixSMU_PM_STATUS_81 0x3ff44 +#define ixSMU_PM_STATUS_82 0x3ff48 +#define ixSMU_PM_STATUS_83 0x3ff4c +#define ixSMU_PM_STATUS_84 0x3ff50 +#define ixSMU_PM_STATUS_85 0x3ff54 +#define ixSMU_PM_STATUS_86 0x3ff58 +#define ixSMU_PM_STATUS_87 0x3ff5c +#define ixSMU_PM_STATUS_88 0x3ff60 +#define ixSMU_PM_STATUS_89 0x3ff64 +#define ixSMU_PM_STATUS_90 0x3ff68 +#define ixSMU_PM_STATUS_91 0x3ff6c +#define ixSMU_PM_STATUS_92 0x3ff70 +#define ixSMU_PM_STATUS_93 0x3ff74 +#define ixSMU_PM_STATUS_94 0x3ff78 +#define ixSMU_PM_STATUS_95 0x3ff7c +#define ixSMU_PM_STATUS_96 0x3ff80 +#define ixSMU_PM_STATUS_97 0x3ff84 +#define ixSMU_PM_STATUS_98 0x3ff88 +#define ixSMU_PM_STATUS_99 0x3ff8c +#define ixSMU_PM_STATUS_100 0x3ff90 +#define ixSMU_PM_STATUS_101 0x3ff94 +#define ixSMU_PM_STATUS_102 0x3ff98 +#define ixSMU_PM_STATUS_103 0x3ff9c +#define ixSMU_PM_STATUS_104 0x3ffa0 +#define ixSMU_PM_STATUS_105 0x3ffa4 +#define ixSMU_PM_STATUS_106 0x3ffa8 +#define ixSMU_PM_STATUS_107 0x3ffac +#define ixSMU_PM_STATUS_108 0x3ffb0 +#define ixSMU_PM_STATUS_109 0x3ffb4 +#define ixSMU_PM_STATUS_110 0x3ffb8 +#define ixSMU_PM_STATUS_111 0x3ffbc +#define ixSMU_PM_STATUS_112 0x3ffc0 +#define ixSMU_PM_STATUS_113 0x3ffc4 +#define ixSMU_PM_STATUS_114 0x3ffc8 +#define ixSMU_PM_STATUS_115 0x3ffcc +#define ixSMU_PM_STATUS_116 0x3ffd0 +#define ixSMU_PM_STATUS_117 0x3ffd4 +#define ixSMU_PM_STATUS_118 0x3ffd8 +#define ixSMU_PM_STATUS_119 0x3ffdc +#define ixSMU_PM_STATUS_120 0x3ffe0 +#define ixSMU_PM_STATUS_121 0x3ffe4 +#define ixSMU_PM_STATUS_122 0x3ffe8 +#define ixSMU_PM_STATUS_123 0x3ffec +#define ixSMU_PM_STATUS_124 0x3fff0 +#define ixSMU_PM_STATUS_125 0x3fff4 +#define ixSMU_PM_STATUS_126 0x3fff8 +#define ixSMU_PM_STATUS_127 0x3fffc +#define ixCG_THERMAL_INT_ENA 0xc2100024 +#define ixCG_THERMAL_INT_CTRL 0xc2100028 +#define ixCG_THERMAL_INT_STATUS 0xc210002c +#define ixCG_THERMAL_CTRL 0xc0300004 +#define ixCG_THERMAL_STATUS 0xc0300008 +#define ixCG_THERMAL_INT 0xc030000c +#define ixCG_MULT_THERMAL_CTRL 0xc0300010 +#define ixCG_MULT_THERMAL_STATUS 0xc0300014 +#define ixTHM_TMON2_CTRL 0xc0300034 +#define ixTHM_TMON2_CTRL2 0xc0300038 +#define ixTHM_TMON2_CSR_WR 0xc0300054 +#define ixTHM_TMON2_CSR_RD 0xc0300058 +#define ixCG_FDO_CTRL0 0xc0300064 +#define ixCG_FDO_CTRL1 0xc0300068 +#define ixCG_FDO_CTRL2 0xc030006c +#define ixCG_TACH_CTRL 0xc0300070 +#define ixCG_TACH_STATUS 0xc0300074 +#define ixCC_THM_STRAPS0 0xc0300080 +#define ixTHM_TMON0_RDIL0_DATA 0xc0300100 +#define ixTHM_TMON0_RDIL1_DATA 0xc0300104 +#define ixTHM_TMON0_RDIL2_DATA 0xc0300108 +#define ixTHM_TMON0_RDIL3_DATA 0xc030010c +#define ixTHM_TMON0_RDIL4_DATA 0xc0300110 +#define ixTHM_TMON0_RDIL5_DATA 0xc0300114 +#define ixTHM_TMON0_RDIL6_DATA 0xc0300118 +#define ixTHM_TMON0_RDIL7_DATA 0xc030011c +#define ixTHM_TMON0_RDIL8_DATA 0xc0300120 +#define ixTHM_TMON0_RDIL9_DATA 0xc0300124 +#define ixTHM_TMON0_RDIL10_DATA 0xc0300128 +#define ixTHM_TMON0_RDIL11_DATA 0xc030012c +#define ixTHM_TMON0_RDIL12_DATA 0xc0300130 +#define ixTHM_TMON0_RDIL13_DATA 0xc0300134 +#define ixTHM_TMON0_RDIL14_DATA 0xc0300138 +#define ixTHM_TMON0_RDIL15_DATA 0xc030013c +#define ixTHM_TMON0_RDIR0_DATA 0xc0300140 +#define ixTHM_TMON0_RDIR1_DATA 0xc0300144 +#define ixTHM_TMON0_RDIR2_DATA 0xc0300148 +#define ixTHM_TMON0_RDIR3_DATA 0xc030014c +#define ixTHM_TMON0_RDIR4_DATA 0xc0300150 +#define ixTHM_TMON0_RDIR5_DATA 0xc0300154 +#define ixTHM_TMON0_RDIR6_DATA 0xc0300158 +#define ixTHM_TMON0_RDIR7_DATA 0xc030015c +#define ixTHM_TMON0_RDIR8_DATA 0xc0300160 +#define ixTHM_TMON0_RDIR9_DATA 0xc0300164 +#define ixTHM_TMON0_RDIR10_DATA 0xc0300168 +#define ixTHM_TMON0_RDIR11_DATA 0xc030016c +#define ixTHM_TMON0_RDIR12_DATA 0xc0300170 +#define ixTHM_TMON0_RDIR13_DATA 0xc0300174 +#define ixTHM_TMON0_RDIR14_DATA 0xc0300178 +#define ixTHM_TMON0_RDIR15_DATA 0xc030017c +#define ixTHM_TMON1_RDIL0_DATA 0xc0300180 +#define ixTHM_TMON1_RDIL1_DATA 0xc0300184 +#define ixTHM_TMON1_RDIL2_DATA 0xc0300188 +#define ixTHM_TMON1_RDIL3_DATA 0xc030018c +#define ixTHM_TMON1_RDIL4_DATA 0xc0300190 +#define ixTHM_TMON1_RDIL5_DATA 0xc0300194 +#define ixTHM_TMON1_RDIL6_DATA 0xc0300198 +#define ixTHM_TMON1_RDIL7_DATA 0xc030019c +#define ixTHM_TMON1_RDIL8_DATA 0xc03001a0 +#define ixTHM_TMON1_RDIL9_DATA 0xc03001a4 +#define ixTHM_TMON1_RDIL10_DATA 0xc03001a8 +#define ixTHM_TMON1_RDIL11_DATA 0xc03001ac +#define ixTHM_TMON1_RDIL12_DATA 0xc03001b0 +#define ixTHM_TMON1_RDIL13_DATA 0xc03001b4 +#define ixTHM_TMON1_RDIL14_DATA 0xc03001b8 +#define ixTHM_TMON1_RDIL15_DATA 0xc03001bc +#define ixTHM_TMON1_RDIR0_DATA 0xc03001c0 +#define ixTHM_TMON1_RDIR1_DATA 0xc03001c4 +#define ixTHM_TMON1_RDIR2_DATA 0xc03001c8 +#define ixTHM_TMON1_RDIR3_DATA 0xc03001cc +#define ixTHM_TMON1_RDIR4_DATA 0xc03001d0 +#define ixTHM_TMON1_RDIR5_DATA 0xc03001d4 +#define ixTHM_TMON1_RDIR6_DATA 0xc03001d8 +#define ixTHM_TMON1_RDIR7_DATA 0xc03001dc +#define ixTHM_TMON1_RDIR8_DATA 0xc03001e0 +#define ixTHM_TMON1_RDIR9_DATA 0xc03001e4 +#define ixTHM_TMON1_RDIR10_DATA 0xc03001e8 +#define ixTHM_TMON1_RDIR11_DATA 0xc03001ec +#define ixTHM_TMON1_RDIR12_DATA 0xc03001f0 +#define ixTHM_TMON1_RDIR13_DATA 0xc03001f4 +#define ixTHM_TMON1_RDIR14_DATA 0xc03001f8 +#define ixTHM_TMON1_RDIR15_DATA 0xc03001fc +#define ixTHM_TMON2_RDIL0_DATA 0xc0300200 +#define ixTHM_TMON2_RDIL1_DATA 0xc0300204 +#define ixTHM_TMON2_RDIL2_DATA 0xc0300208 +#define ixTHM_TMON2_RDIL3_DATA 0xc030020c +#define ixTHM_TMON2_RDIL4_DATA 0xc0300210 +#define ixTHM_TMON2_RDIL5_DATA 0xc0300214 +#define ixTHM_TMON2_RDIL6_DATA 0xc0300218 +#define ixTHM_TMON2_RDIL7_DATA 0xc030021c +#define ixTHM_TMON2_RDIL8_DATA 0xc0300220 +#define ixTHM_TMON2_RDIL9_DATA 0xc0300224 +#define ixTHM_TMON2_RDIL10_DATA 0xc0300228 +#define ixTHM_TMON2_RDIL11_DATA 0xc030022c +#define ixTHM_TMON2_RDIL12_DATA 0xc0300230 +#define ixTHM_TMON2_RDIL13_DATA 0xc0300234 +#define ixTHM_TMON2_RDIL14_DATA 0xc0300238 +#define ixTHM_TMON2_RDIL15_DATA 0xc030023c +#define ixTHM_TMON2_RDIR0_DATA 0xc0300240 +#define ixTHM_TMON2_RDIR1_DATA 0xc0300244 +#define ixTHM_TMON2_RDIR2_DATA 0xc0300248 +#define ixTHM_TMON2_RDIR3_DATA 0xc030024c +#define ixTHM_TMON2_RDIR4_DATA 0xc0300250 +#define ixTHM_TMON2_RDIR5_DATA 0xc0300254 +#define ixTHM_TMON2_RDIR6_DATA 0xc0300258 +#define ixTHM_TMON2_RDIR7_DATA 0xc030025c +#define ixTHM_TMON2_RDIR8_DATA 0xc0300260 +#define ixTHM_TMON2_RDIR9_DATA 0xc0300264 +#define ixTHM_TMON2_RDIR10_DATA 0xc0300268 +#define ixTHM_TMON2_RDIR11_DATA 0xc030026c +#define ixTHM_TMON2_RDIR12_DATA 0xc0300270 +#define ixTHM_TMON2_RDIR13_DATA 0xc0300274 +#define ixTHM_TMON2_RDIR14_DATA 0xc0300278 +#define ixTHM_TMON2_RDIR15_DATA 0xc030027c +#define ixTHM_TMON0_INT_DATA 0xc0300300 +#define ixTHM_TMON1_INT_DATA 0xc0300304 +#define ixTHM_TMON2_INT_DATA 0xc0300308 +#define ixTHM_TMON0_DEBUG 0xc0300310 +#define ixTHM_TMON1_DEBUG 0xc0300314 +#define ixTHM_TMON2_DEBUG 0xc0300318 +#define ixTHM_TMON0_STATUS 0xc0300320 +#define ixTHM_TMON1_STATUS 0xc0300324 +#define ixTHM_TMON2_STATUS 0xc0300328 +#define ixGENERAL_PWRMGT 0xc0200000 +#define ixCNB_PWRMGT_CNTL 0xc0200004 +#define ixSCLK_PWRMGT_CNTL 0xc0200008 +#define ixTARGET_AND_CURRENT_PROFILE_INDEX 0xc0200014 +#define ixPWR_PCC_CONTROL 0xc0200018 +#define ixPWR_PCC_GPIO_SELECT 0xc020001c +#define ixCG_FREQ_TRAN_VOTING_0 0xc02001a8 +#define ixCG_FREQ_TRAN_VOTING_1 0xc02001ac +#define ixCG_FREQ_TRAN_VOTING_2 0xc02001b0 +#define ixCG_FREQ_TRAN_VOTING_3 0xc02001b4 +#define ixCG_FREQ_TRAN_VOTING_4 0xc02001b8 +#define ixCG_FREQ_TRAN_VOTING_5 0xc02001bc +#define ixCG_FREQ_TRAN_VOTING_6 0xc02001c0 +#define ixCG_FREQ_TRAN_VOTING_7 0xc02001c4 +#define ixPLL_TEST_CNTL 0xc020003c +#define ixCG_STATIC_SCREEN_PARAMETER 0xc0200044 +#define ixCG_DISPLAY_GAP_CNTL 0xc0200060 +#define ixCG_DISPLAY_GAP_CNTL2 0xc0200230 +#define ixCG_ACPI_CNTL 0xc0200064 +#define ixSCLK_DEEP_SLEEP_CNTL 0xc0200080 +#define ixSCLK_DEEP_SLEEP_CNTL2 0xc0200084 +#define ixSCLK_DEEP_SLEEP_CNTL3 0xc020009c +#define ixSCLK_DEEP_SLEEP_MISC_CNTL 0xc0200088 +#define ixLCLK_DEEP_SLEEP_CNTL 0xc020008c +#define ixLCLK_DEEP_SLEEP_CNTL2 0xc0200310 +#define ixTARGET_AND_CURRENT_PROFILE_INDEX_1 0xc02000f0 +#define ixCG_ULV_PARAMETER 0xc020015c +#define ixSCLK_MIN_DIV 0xc02003ac +#define ixPWR_AVFS_SEL 0xc0200384 +#define ixPWR_AVFS_CNTL 0xc0200388 +#define ixPWR_AVFS0_CNTL_STATUS 0xc0200400 +#define ixPWR_AVFS1_CNTL_STATUS 0xc0200404 +#define ixPWR_AVFS2_CNTL_STATUS 0xc0200408 +#define ixPWR_AVFS3_CNTL_STATUS 0xc020040c +#define ixPWR_AVFS4_CNTL_STATUS 0xc0200410 +#define ixPWR_AVFS5_CNTL_STATUS 0xc0200414 +#define ixPWR_AVFS6_CNTL_STATUS 0xc0200418 +#define ixPWR_AVFS7_CNTL_STATUS 0xc020041c +#define ixPWR_AVFS8_CNTL_STATUS 0xc0200420 +#define ixPWR_AVFS9_CNTL_STATUS 0xc0200424 +#define ixPWR_AVFS10_CNTL_STATUS 0xc0200428 +#define ixPWR_AVFS11_CNTL_STATUS 0xc020042c +#define ixPWR_AVFS12_CNTL_STATUS 0xc0200430 +#define ixPWR_AVFS13_CNTL_STATUS 0xc0200434 +#define ixPWR_AVFS14_CNTL_STATUS 0xc0200438 +#define ixPWR_AVFS15_CNTL_STATUS 0xc020043c +#define ixPWR_AVFS16_CNTL_STATUS 0xc0200440 +#define ixPWR_AVFS17_CNTL_STATUS 0xc0200444 +#define ixPWR_AVFS18_CNTL_STATUS 0xc0200448 +#define ixPWR_AVFS19_CNTL_STATUS 0xc020044c +#define ixPWR_AVFS20_CNTL_STATUS 0xc0200450 +#define ixPWR_AVFS21_CNTL_STATUS 0xc0200454 +#define ixPWR_AVFS22_CNTL_STATUS 0xc0200458 +#define ixPWR_AVFS23_CNTL_STATUS 0xc020045c +#define ixPWR_AVFS24_CNTL_STATUS 0xc0200460 +#define ixPWR_AVFS25_CNTL_STATUS 0xc0200464 +#define ixPWR_AVFS26_CNTL_STATUS 0xc0200468 +#define ixPWR_AVFS27_CNTL_STATUS 0xc020046c +#define ixPWR_CKS_ENABLE 0xc020034c +#define ixPWR_CKS_CNTL 0xc0200350 +#define ixPWR_DISP_TIMER_CONTROL 0xc02003c0 +#define ixPWR_DISP_TIMER_DEBUG 0xc02003c4 +#define ixPWR_DISP_TIMER2_CONTROL 0xc02003c8 +#define ixPWR_DISP_TIMER2_DEBUG 0xc02003cc +#define ixPWR_DISP_TIMER_CONTROL2 0xc0200378 +#define ixVDDGFX_IDLE_PARAMETER 0xc020036c +#define ixVDDGFX_IDLE_CONTROL 0xc0200370 +#define ixVDDGFX_IDLE_EXIT 0xc0200374 +#define ixLCAC_MC0_CNTL 0xc0400130 +#define ixLCAC_MC0_OVR_SEL 0xc0400134 +#define ixLCAC_MC0_OVR_VAL 0xc0400138 +#define ixLCAC_MC1_CNTL 0xc040013c +#define ixLCAC_MC1_OVR_SEL 0xc0400140 +#define ixLCAC_MC1_OVR_VAL 0xc0400144 +#define ixLCAC_MC2_CNTL 0xc0400148 +#define ixLCAC_MC2_OVR_SEL 0xc040014c +#define ixLCAC_MC2_OVR_VAL 0xc0400150 +#define ixLCAC_MC3_CNTL 0xc0400154 +#define ixLCAC_MC3_OVR_SEL 0xc0400158 +#define ixLCAC_MC3_OVR_VAL 0xc040015c +#define ixLCAC_MC4_CNTL 0xc0400d60 +#define ixLCAC_MC4_OVR_SEL 0xc0400d64 +#define ixLCAC_MC4_OVR_VAL 0xc0400d68 +#define ixLCAC_MC5_CNTL 0xc0400d6c +#define ixLCAC_MC5_OVR_SEL 0xc0400d70 +#define ixLCAC_MC5_OVR_VAL 0xc0400d74 +#define ixLCAC_MC6_CNTL 0xc0400d78 +#define ixLCAC_MC6_OVR_SEL 0xc0400d7c +#define ixLCAC_MC6_OVR_VAL 0xc0400d80 +#define ixLCAC_MC7_CNTL 0xc0400d84 +#define ixLCAC_MC7_OVR_SEL 0xc0400d88 +#define ixLCAC_MC7_OVR_VAL 0xc0400d8c +#define ixLCAC_CPL_CNTL 0xc0400160 +#define ixLCAC_CPL_OVR_SEL 0xc0400164 +#define ixLCAC_CPL_OVR_VAL 0xc0400168 +#define mmROM_SMC_IND_INDEX 0x80 +#define mmROM0_ROM_SMC_IND_INDEX 0x80 +#define mmROM1_ROM_SMC_IND_INDEX 0x82 +#define mmROM2_ROM_SMC_IND_INDEX 0x84 +#define mmROM3_ROM_SMC_IND_INDEX 0x86 +#define mmROM_SMC_IND_DATA 0x81 +#define mmROM0_ROM_SMC_IND_DATA 0x81 +#define mmROM1_ROM_SMC_IND_DATA 0x83 +#define mmROM2_ROM_SMC_IND_DATA 0x85 +#define mmROM3_ROM_SMC_IND_DATA 0x87 +#define ixROM_CNTL 0xc0600000 +#define ixPAGE_MIRROR_CNTL 0xc0600004 +#define ixROM_STATUS 0xc0600008 +#define ixCGTT_ROM_CLK_CTRL0 0xc060000c +#define ixROM_INDEX 0xc0600010 +#define ixROM_DATA 0xc0600014 +#define ixROM_START 0xc0600018 +#define ixROM_SW_CNTL 0xc060001c +#define ixROM_SW_STATUS 0xc0600020 +#define ixROM_SW_COMMAND 0xc0600024 +#define ixROM_SW_DATA_1 0xc0600028 +#define ixROM_SW_DATA_2 0xc060002c +#define ixROM_SW_DATA_3 0xc0600030 +#define ixROM_SW_DATA_4 0xc0600034 +#define ixROM_SW_DATA_5 0xc0600038 +#define ixROM_SW_DATA_6 0xc060003c +#define ixROM_SW_DATA_7 0xc0600040 +#define ixROM_SW_DATA_8 0xc0600044 +#define ixROM_SW_DATA_9 0xc0600048 +#define ixROM_SW_DATA_10 0xc060004c +#define ixROM_SW_DATA_11 0xc0600050 +#define ixROM_SW_DATA_12 0xc0600054 +#define ixROM_SW_DATA_13 0xc0600058 +#define ixROM_SW_DATA_14 0xc060005c +#define ixROM_SW_DATA_15 0xc0600060 +#define ixROM_SW_DATA_16 0xc0600064 +#define ixROM_SW_DATA_17 0xc0600068 +#define ixROM_SW_DATA_18 0xc060006c +#define ixROM_SW_DATA_19 0xc0600070 +#define ixROM_SW_DATA_20 0xc0600074 +#define ixROM_SW_DATA_21 0xc0600078 +#define ixROM_SW_DATA_22 0xc060007c +#define ixROM_SW_DATA_23 0xc0600080 +#define ixROM_SW_DATA_24 0xc0600084 +#define ixROM_SW_DATA_25 0xc0600088 +#define ixROM_SW_DATA_26 0xc060008c +#define ixROM_SW_DATA_27 0xc0600090 +#define ixROM_SW_DATA_28 0xc0600094 +#define ixROM_SW_DATA_29 0xc0600098 +#define ixROM_SW_DATA_30 0xc060009c +#define ixROM_SW_DATA_31 0xc06000a0 +#define ixROM_SW_DATA_32 0xc06000a4 +#define ixROM_SW_DATA_33 0xc06000a8 +#define ixROM_SW_DATA_34 0xc06000ac +#define ixROM_SW_DATA_35 0xc06000b0 +#define ixROM_SW_DATA_36 0xc06000b4 +#define ixROM_SW_DATA_37 0xc06000b8 +#define ixROM_SW_DATA_38 0xc06000bc +#define ixROM_SW_DATA_39 0xc06000c0 +#define ixROM_SW_DATA_40 0xc06000c4 +#define ixROM_SW_DATA_41 0xc06000c8 +#define ixROM_SW_DATA_42 0xc06000cc +#define ixROM_SW_DATA_43 0xc06000d0 +#define ixROM_SW_DATA_44 0xc06000d4 +#define ixROM_SW_DATA_45 0xc06000d8 +#define ixROM_SW_DATA_46 0xc06000dc +#define ixROM_SW_DATA_47 0xc06000e0 +#define ixROM_SW_DATA_48 0xc06000e4 +#define ixROM_SW_DATA_49 0xc06000e8 +#define ixROM_SW_DATA_50 0xc06000ec +#define ixROM_SW_DATA_51 0xc06000f0 +#define ixROM_SW_DATA_52 0xc06000f4 +#define ixROM_SW_DATA_53 0xc06000f8 +#define ixROM_SW_DATA_54 0xc06000fc +#define ixROM_SW_DATA_55 0xc0600100 +#define ixROM_SW_DATA_56 0xc0600104 +#define ixROM_SW_DATA_57 0xc0600108 +#define ixROM_SW_DATA_58 0xc060010c +#define ixROM_SW_DATA_59 0xc0600110 +#define ixROM_SW_DATA_60 0xc0600114 +#define ixROM_SW_DATA_61 0xc0600118 +#define ixROM_SW_DATA_62 0xc060011c +#define ixROM_SW_DATA_63 0xc0600120 +#define ixROM_SW_DATA_64 0xc0600124 +#define mmGC_CAC_CGTT_CLK_CTRL 0x3292 +#define mmSE_CAC_CGTT_CLK_CTRL 0x3293 +#define mmGC_CAC_LKG_AGGR_LOWER 0x3296 +#define mmGC_CAC_LKG_AGGR_UPPER 0x3297 +#define ixGC_CAC_WEIGHT_CU_0 0x32 +#define ixGC_CAC_WEIGHT_CU_1 0x33 +#define ixGC_CAC_WEIGHT_CU_2 0x34 +#define ixGC_CAC_WEIGHT_CU_3 0x35 +#define ixGC_CAC_WEIGHT_CU_4 0x36 +#define ixGC_CAC_WEIGHT_CU_5 0x37 +#define ixGC_CAC_WEIGHT_CU_6 0x38 +#define ixGC_CAC_WEIGHT_CU_7 0x39 +#define ixGC_CAC_ACC_CU0 0xba +#define ixGC_CAC_ACC_CU1 0xbb +#define ixGC_CAC_ACC_CU2 0xbc +#define ixGC_CAC_ACC_CU3 0xbd +#define ixGC_CAC_ACC_CU4 0xbe +#define ixGC_CAC_ACC_CU5 0xbf +#define ixGC_CAC_ACC_CU6 0xc0 +#define ixGC_CAC_ACC_CU7 0xc1 +#define ixGC_CAC_ACC_CU8 0xc2 +#define ixGC_CAC_ACC_CU9 0xc3 +#define ixGC_CAC_ACC_CU10 0xc4 +#define ixGC_CAC_ACC_CU11 0xc5 +#define ixGC_CAC_ACC_CU12 0xc6 +#define ixGC_CAC_ACC_CU13 0xc7 +#define ixGC_CAC_ACC_CU14 0xc8 +#define ixGC_CAC_ACC_CU15 0xc9 +#define ixGC_CAC_OVRD_CU 0xe7 + +#endif /* SMU_7_1_3_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h new file mode 100644 index 000000000000..f19c4208d963 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h @@ -0,0 +1,1282 @@ +/* + * SMU_7_1_3 Register documentation + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef SMU_7_1_3_ENUM_H +#define SMU_7_1_3_ENUM_H + +#define CG_SRBM_START_ADDR 0x600 +#define CG_SRBM_END_ADDR 0x8ff +#define RCU_CCF_DWORDS0 0xa0 +#define RCU_CCF_BITS0 0x1400 +#define RCU_SAM_BYTES 0x2c +#define RCU_SAM_RTL_BYTES 0x2c +#define RCU_SMU_BYTES 0x14 +#define RCU_SMU_RTL_BYTES 0x14 +#define SFP_CHAIN_ADDR 0x1 +#define SFP_SADR 0x0 +#define SFP_EADR 0x37f +#define SAMU_KEY_CHAIN_ADR 0x0 +#define SAMU_KEY_SADR 0x280 +#define SAMU_KEY_EADR 0x2ab +#define SMU_KEY_CHAIN_ADR 0x0 +#define SMU_KEY_SADR 0x2ac +#define SMU_KEY_EADR 0x2bf +#define SMC_MSG_TEST 0x1 +#define SMC_MSG_PHY_LN_OFF 0x2 +#define SMC_MSG_PHY_LN_ON 0x3 +#define SMC_MSG_DDI_PHY_OFF 0x4 +#define SMC_MSG_DDI_PHY_ON 0x5 +#define SMC_MSG_CASCADE_PLL_OFF 0x6 +#define SMC_MSG_CASCADE_PLL_ON 0x7 +#define SMC_MSG_PWR_OFF_x16 0x8 +#define SMC_MSG_CONFIG_LCLK_DPM 0x9 +#define SMC_MSG_FLUSH_DATA_CACHE 0xa +#define SMC_MSG_FLUSH_INSTRUCTION_CACHE 0xb +#define SMC_MSG_CONFIG_VPC_ACCUMULATOR 0xc +#define SMC_MSG_CONFIG_BAPM 0xd +#define SMC_MSG_CONFIG_TDC_LIMIT 0xe +#define SMC_MSG_CONFIG_LPMx 0xf +#define SMC_MSG_CONFIG_HTC_LIMIT 0x10 +#define SMC_MSG_CONFIG_THERMAL_CNTL 0x11 +#define SMC_MSG_CONFIG_VOLTAGE_CNTL 0x12 +#define SMC_MSG_CONFIG_TDP_CNTL 0x13 +#define SMC_MSG_EN_PM_CNTL 0x14 +#define SMC_MSG_DIS_PM_CNTL 0x15 +#define SMC_MSG_CONFIG_NBDPM 0x16 +#define SMC_MSG_CONFIG_LOADLINE 0x17 +#define SMC_MSG_ADJUST_LOADLINE 0x18 +#define SMC_MSG_RESET 0x20 +#define SMC_MSG_VOLTAGE 0x25 +#define SMC_VERSION_MAJOR 0x7 +#define SMC_VERSION_MINOR 0x0 +#define SMC_HEADER_SIZE 0x40 +#define ROM_SIGNATURE 0xaa55 +typedef enum SurfaceEndian { + ENDIAN_NONE = 0x0, + ENDIAN_8IN16 = 0x1, + ENDIAN_8IN32 = 0x2, + ENDIAN_8IN64 = 0x3, +} SurfaceEndian; +typedef enum ArrayMode { + ARRAY_LINEAR_GENERAL = 0x0, + ARRAY_LINEAR_ALIGNED = 0x1, + ARRAY_1D_TILED_THIN1 = 0x2, + ARRAY_1D_TILED_THICK = 0x3, + ARRAY_2D_TILED_THIN1 = 0x4, + ARRAY_PRT_TILED_THIN1 = 0x5, + ARRAY_PRT_2D_TILED_THIN1 = 0x6, + ARRAY_2D_TILED_THICK = 0x7, + ARRAY_2D_TILED_XTHICK = 0x8, + ARRAY_PRT_TILED_THICK = 0x9, + ARRAY_PRT_2D_TILED_THICK = 0xa, + ARRAY_PRT_3D_TILED_THIN1 = 0xb, + ARRAY_3D_TILED_THIN1 = 0xc, + ARRAY_3D_TILED_THICK = 0xd, + ARRAY_3D_TILED_XTHICK = 0xe, + ARRAY_PRT_3D_TILED_THICK = 0xf, +} ArrayMode; +typedef enum PipeTiling { + CONFIG_1_PIPE = 0x0, + CONFIG_2_PIPE = 0x1, + CONFIG_4_PIPE = 0x2, + CONFIG_8_PIPE = 0x3, +} PipeTiling; +typedef enum BankTiling { + CONFIG_4_BANK = 0x0, + CONFIG_8_BANK = 0x1, +} BankTiling; +typedef enum GroupInterleave { + CONFIG_256B_GROUP = 0x0, + CONFIG_512B_GROUP = 0x1, +} GroupInterleave; +typedef enum RowTiling { + CONFIG_1KB_ROW = 0x0, + CONFIG_2KB_ROW = 0x1, + CONFIG_4KB_ROW = 0x2, + CONFIG_8KB_ROW = 0x3, + CONFIG_1KB_ROW_OPT = 0x4, + CONFIG_2KB_ROW_OPT = 0x5, + CONFIG_4KB_ROW_OPT = 0x6, + CONFIG_8KB_ROW_OPT = 0x7, +} RowTiling; +typedef enum BankSwapBytes { + CONFIG_128B_SWAPS = 0x0, + CONFIG_256B_SWAPS = 0x1, + CONFIG_512B_SWAPS = 0x2, + CONFIG_1KB_SWAPS = 0x3, +} BankSwapBytes; +typedef enum SampleSplitBytes { + CONFIG_1KB_SPLIT = 0x0, + CONFIG_2KB_SPLIT = 0x1, + CONFIG_4KB_SPLIT = 0x2, + CONFIG_8KB_SPLIT = 0x3, +} SampleSplitBytes; +typedef enum NumPipes { + ADDR_CONFIG_1_PIPE = 0x0, + ADDR_CONFIG_2_PIPE = 0x1, + ADDR_CONFIG_4_PIPE = 0x2, + ADDR_CONFIG_8_PIPE = 0x3, +} NumPipes; +typedef enum PipeInterleaveSize { + ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0, + ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1, +} PipeInterleaveSize; +typedef enum BankInterleaveSize { + ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0, + ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1, + ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2, + ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3, +} BankInterleaveSize; +typedef enum NumShaderEngines { + ADDR_CONFIG_1_SHADER_ENGINE = 0x0, + ADDR_CONFIG_2_SHADER_ENGINE = 0x1, +} NumShaderEngines; +typedef enum ShaderEngineTileSize { + ADDR_CONFIG_SE_TILE_16 = 0x0, + ADDR_CONFIG_SE_TILE_32 = 0x1, +} ShaderEngineTileSize; +typedef enum NumGPUs { + ADDR_CONFIG_1_GPU = 0x0, + ADDR_CONFIG_2_GPU = 0x1, + ADDR_CONFIG_4_GPU = 0x2, +} NumGPUs; +typedef enum MultiGPUTileSize { + ADDR_CONFIG_GPU_TILE_16 = 0x0, + ADDR_CONFIG_GPU_TILE_32 = 0x1, + ADDR_CONFIG_GPU_TILE_64 = 0x2, + ADDR_CONFIG_GPU_TILE_128 = 0x3, +} MultiGPUTileSize; +typedef enum RowSize { + ADDR_CONFIG_1KB_ROW = 0x0, + ADDR_CONFIG_2KB_ROW = 0x1, + ADDR_CONFIG_4KB_ROW = 0x2, +} RowSize; +typedef enum NumLowerPipes { + ADDR_CONFIG_1_LOWER_PIPES = 0x0, + ADDR_CONFIG_2_LOWER_PIPES = 0x1, +} NumLowerPipes; +typedef enum DebugBlockId { + DBG_CLIENT_BLKID_RESERVED = 0x0, + DBG_CLIENT_BLKID_dbg = 0x1, + DBG_CLIENT_BLKID_scf2 = 0x2, + DBG_CLIENT_BLKID_mcd5_0 = 0x3, + DBG_CLIENT_BLKID_mcd5_1 = 0x4, + DBG_CLIENT_BLKID_mcd6_0 = 0x5, + DBG_CLIENT_BLKID_mcd6_1 = 0x6, + DBG_CLIENT_BLKID_mcd7_0 = 0x7, + DBG_CLIENT_BLKID_mcd7_1 = 0x8, + DBG_CLIENT_BLKID_vmc = 0x9, + DBG_CLIENT_BLKID_sx30 = 0xa, + DBG_CLIENT_BLKID_mcd2_0 = 0xb, + DBG_CLIENT_BLKID_mcd2_1 = 0xc, + DBG_CLIENT_BLKID_bci1 = 0xd, + DBG_CLIENT_BLKID_xdma_dbg_client_wrapper = 0xe, + DBG_CLIENT_BLKID_mcc0 = 0xf, + DBG_CLIENT_BLKID_uvdf_0 = 0x10, + DBG_CLIENT_BLKID_uvdf_1 = 0x11, + DBG_CLIENT_BLKID_uvdf_2 = 0x12, + DBG_CLIENT_BLKID_bci0 = 0x13, + DBG_CLIENT_BLKID_vcec0_0 = 0x14, + DBG_CLIENT_BLKID_cb100 = 0x15, + DBG_CLIENT_BLKID_cb001 = 0x16, + DBG_CLIENT_BLKID_cb002 = 0x17, + DBG_CLIENT_BLKID_cb003 = 0x18, + DBG_CLIENT_BLKID_mcd4_0 = 0x19, + DBG_CLIENT_BLKID_mcd4_1 = 0x1a, + DBG_CLIENT_BLKID_tmonw00 = 0x1b, + DBG_CLIENT_BLKID_cb101 = 0x1c, + DBG_CLIENT_BLKID_cb102 = 0x1d, + DBG_CLIENT_BLKID_cb103 = 0x1e, + DBG_CLIENT_BLKID_sx10 = 0x1f, + DBG_CLIENT_BLKID_cb301 = 0x20, + DBG_CLIENT_BLKID_cb302 = 0x21, + DBG_CLIENT_BLKID_cb303 = 0x22, + DBG_CLIENT_BLKID_tmonw01 = 0x23, + DBG_CLIENT_BLKID_tmonw02 = 0x24, + DBG_CLIENT_BLKID_vcea0_0 = 0x25, + DBG_CLIENT_BLKID_vcea0_1 = 0x26, + DBG_CLIENT_BLKID_vcea0_2 = 0x27, + DBG_CLIENT_BLKID_vcea0_3 = 0x28, + DBG_CLIENT_BLKID_scf1 = 0x29, + DBG_CLIENT_BLKID_sx20 = 0x2a, + DBG_CLIENT_BLKID_spim1 = 0x2b, + DBG_CLIENT_BLKID_scb1 = 0x2c, + DBG_CLIENT_BLKID_pa10 = 0x2d, + DBG_CLIENT_BLKID_pa00 = 0x2e, + DBG_CLIENT_BLKID_gmcon = 0x2f, + DBG_CLIENT_BLKID_mcb = 0x30, + DBG_CLIENT_BLKID_vgt0 = 0x31, + DBG_CLIENT_BLKID_pc0 = 0x32, + DBG_CLIENT_BLKID_bci2 = 0x33, + DBG_CLIENT_BLKID_uvdb_0 = 0x34, + DBG_CLIENT_BLKID_spim3 = 0x35, + DBG_CLIENT_BLKID_scb3 = 0x36, + DBG_CLIENT_BLKID_cpc_0 = 0x37, + DBG_CLIENT_BLKID_cpc_1 = 0x38, + DBG_CLIENT_BLKID_uvdm_0 = 0x39, + DBG_CLIENT_BLKID_uvdm_1 = 0x3a, + DBG_CLIENT_BLKID_uvdm_2 = 0x3b, + DBG_CLIENT_BLKID_uvdm_3 = 0x3c, + DBG_CLIENT_BLKID_cb000 = 0x3d, + DBG_CLIENT_BLKID_spim0 = 0x3e, + DBG_CLIENT_BLKID_scb0 = 0x3f, + DBG_CLIENT_BLKID_mcc2 = 0x40, + DBG_CLIENT_BLKID_ds0 = 0x41, + DBG_CLIENT_BLKID_srbm = 0x42, + DBG_CLIENT_BLKID_ih = 0x43, + DBG_CLIENT_BLKID_sem = 0x44, + DBG_CLIENT_BLKID_sdma_0 = 0x45, + DBG_CLIENT_BLKID_sdma_1 = 0x46, + DBG_CLIENT_BLKID_hdp = 0x47, + DBG_CLIENT_BLKID_acp_0 = 0x48, + DBG_CLIENT_BLKID_acp_1 = 0x49, + DBG_CLIENT_BLKID_cb200 = 0x4a, + DBG_CLIENT_BLKID_scf3 = 0x4b, + DBG_CLIENT_BLKID_bci3 = 0x4c, + DBG_CLIENT_BLKID_mcd0_0 = 0x4d, + DBG_CLIENT_BLKID_mcd0_1 = 0x4e, + DBG_CLIENT_BLKID_pa11 = 0x4f, + DBG_CLIENT_BLKID_pa01 = 0x50, + DBG_CLIENT_BLKID_cb201 = 0x51, + DBG_CLIENT_BLKID_cb202 = 0x52, + DBG_CLIENT_BLKID_cb203 = 0x53, + DBG_CLIENT_BLKID_spim2 = 0x54, + DBG_CLIENT_BLKID_scb2 = 0x55, + DBG_CLIENT_BLKID_vgt2 = 0x56, + DBG_CLIENT_BLKID_pc2 = 0x57, + DBG_CLIENT_BLKID_smu_0 = 0x58, + DBG_CLIENT_BLKID_smu_1 = 0x59, + DBG_CLIENT_BLKID_smu_2 = 0x5a, + DBG_CLIENT_BLKID_cb1 = 0x5b, + DBG_CLIENT_BLKID_ia0 = 0x5c, + DBG_CLIENT_BLKID_wd = 0x5d, + DBG_CLIENT_BLKID_ia1 = 0x5e, + DBG_CLIENT_BLKID_scf0 = 0x5f, + DBG_CLIENT_BLKID_vgt1 = 0x60, + DBG_CLIENT_BLKID_pc1 = 0x61, + DBG_CLIENT_BLKID_cb0 = 0x62, + DBG_CLIENT_BLKID_gdc_one_0 = 0x63, + DBG_CLIENT_BLKID_gdc_one_1 = 0x64, + DBG_CLIENT_BLKID_gdc_one_2 = 0x65, + DBG_CLIENT_BLKID_gdc_one_3 = 0x66, + DBG_CLIENT_BLKID_gdc_one_4 = 0x67, + DBG_CLIENT_BLKID_gdc_one_5 = 0x68, + DBG_CLIENT_BLKID_gdc_one_6 = 0x69, + DBG_CLIENT_BLKID_gdc_one_7 = 0x6a, + DBG_CLIENT_BLKID_gdc_one_8 = 0x6b, + DBG_CLIENT_BLKID_gdc_one_9 = 0x6c, + DBG_CLIENT_BLKID_gdc_one_10 = 0x6d, + DBG_CLIENT_BLKID_gdc_one_11 = 0x6e, + DBG_CLIENT_BLKID_gdc_one_12 = 0x6f, + DBG_CLIENT_BLKID_gdc_one_13 = 0x70, + DBG_CLIENT_BLKID_gdc_one_14 = 0x71, + DBG_CLIENT_BLKID_gdc_one_15 = 0x72, + DBG_CLIENT_BLKID_gdc_one_16 = 0x73, + DBG_CLIENT_BLKID_gdc_one_17 = 0x74, + DBG_CLIENT_BLKID_gdc_one_18 = 0x75, + DBG_CLIENT_BLKID_gdc_one_19 = 0x76, + DBG_CLIENT_BLKID_gdc_one_20 = 0x77, + DBG_CLIENT_BLKID_gdc_one_21 = 0x78, + DBG_CLIENT_BLKID_gdc_one_22 = 0x79, + DBG_CLIENT_BLKID_gdc_one_23 = 0x7a, + DBG_CLIENT_BLKID_gdc_one_24 = 0x7b, + DBG_CLIENT_BLKID_gdc_one_25 = 0x7c, + DBG_CLIENT_BLKID_gdc_one_26 = 0x7d, + DBG_CLIENT_BLKID_gdc_one_27 = 0x7e, + DBG_CLIENT_BLKID_gdc_one_28 = 0x7f, + DBG_CLIENT_BLKID_gdc_one_29 = 0x80, + DBG_CLIENT_BLKID_gdc_one_30 = 0x81, + DBG_CLIENT_BLKID_gdc_one_31 = 0x82, + DBG_CLIENT_BLKID_gdc_one_32 = 0x83, + DBG_CLIENT_BLKID_gdc_one_33 = 0x84, + DBG_CLIENT_BLKID_gdc_one_34 = 0x85, + DBG_CLIENT_BLKID_gdc_one_35 = 0x86, + DBG_CLIENT_BLKID_vceb0_0 = 0x87, + DBG_CLIENT_BLKID_vgt3 = 0x88, + DBG_CLIENT_BLKID_pc3 = 0x89, + DBG_CLIENT_BLKID_mcd3_0 = 0x8a, + DBG_CLIENT_BLKID_mcd3_1 = 0x8b, + DBG_CLIENT_BLKID_uvdu_0 = 0x8c, + DBG_CLIENT_BLKID_uvdu_1 = 0x8d, + DBG_CLIENT_BLKID_uvdu_2 = 0x8e, + DBG_CLIENT_BLKID_uvdu_3 = 0x8f, + DBG_CLIENT_BLKID_uvdu_4 = 0x90, + DBG_CLIENT_BLKID_uvdu_5 = 0x91, + DBG_CLIENT_BLKID_uvdu_6 = 0x92, + DBG_CLIENT_BLKID_cb300 = 0x93, + DBG_CLIENT_BLKID_mcd1_0 = 0x94, + DBG_CLIENT_BLKID_mcd1_1 = 0x95, + DBG_CLIENT_BLKID_sx00 = 0x96, + DBG_CLIENT_BLKID_uvdc_0 = 0x97, + DBG_CLIENT_BLKID_uvdc_1 = 0x98, + DBG_CLIENT_BLKID_mcc3 = 0x99, + DBG_CLIENT_BLKID_mcc4 = 0x9a, + DBG_CLIENT_BLKID_mcc5 = 0x9b, + DBG_CLIENT_BLKID_mcc6 = 0x9c, + DBG_CLIENT_BLKID_mcc7 = 0x9d, + DBG_CLIENT_BLKID_cpg_0 = 0x9e, + DBG_CLIENT_BLKID_cpg_1 = 0x9f, + DBG_CLIENT_BLKID_gck = 0xa0, + DBG_CLIENT_BLKID_mcc1 = 0xa1, + DBG_CLIENT_BLKID_cpf_0 = 0xa2, + DBG_CLIENT_BLKID_cpf_1 = 0xa3, + DBG_CLIENT_BLKID_rlc = 0xa4, + DBG_CLIENT_BLKID_grbm = 0xa5, + DBG_CLIENT_BLKID_sammsp = 0xa6, + DBG_CLIENT_BLKID_dci_pg = 0xa7, + DBG_CLIENT_BLKID_dci_0 = 0xa8, + DBG_CLIENT_BLKID_dccg0_0 = 0xa9, + DBG_CLIENT_BLKID_dccg0_1 = 0xaa, + DBG_CLIENT_BLKID_dcfe01_0 = 0xab, + DBG_CLIENT_BLKID_dcfe02_0 = 0xac, + DBG_CLIENT_BLKID_dcfe03_0 = 0xad, + DBG_CLIENT_BLKID_dcfe04_0 = 0xae, + DBG_CLIENT_BLKID_dcfe05_0 = 0xaf, + DBG_CLIENT_BLKID_dcfe06_0 = 0xb0, + DBG_CLIENT_BLKID_mcq0_0 = 0xb1, + DBG_CLIENT_BLKID_mcq0_1 = 0xb2, + DBG_CLIENT_BLKID_mcq1_0 = 0xb3, + DBG_CLIENT_BLKID_mcq1_1 = 0xb4, + DBG_CLIENT_BLKID_mcq2_0 = 0xb5, + DBG_CLIENT_BLKID_mcq2_1 = 0xb6, + DBG_CLIENT_BLKID_mcq3_0 = 0xb7, + DBG_CLIENT_BLKID_mcq3_1 = 0xb8, + DBG_CLIENT_BLKID_mcq4_0 = 0xb9, + DBG_CLIENT_BLKID_mcq4_1 = 0xba, + DBG_CLIENT_BLKID_mcq5_0 = 0xbb, + DBG_CLIENT_BLKID_mcq5_1 = 0xbc, + DBG_CLIENT_BLKID_mcq6_0 = 0xbd, + DBG_CLIENT_BLKID_mcq6_1 = 0xbe, + DBG_CLIENT_BLKID_mcq7_0 = 0xbf, + DBG_CLIENT_BLKID_mcq7_1 = 0xc0, + DBG_CLIENT_BLKID_uvdi_0 = 0xc1, + DBG_CLIENT_BLKID_RESERVED_LAST = 0xc2, +} DebugBlockId; +typedef enum DebugBlockId_OLD { + DBG_BLOCK_ID_RESERVED = 0x0, + DBG_BLOCK_ID_DBG = 0x1, + DBG_BLOCK_ID_VMC = 0x2, + DBG_BLOCK_ID_PDMA = 0x3, + DBG_BLOCK_ID_CG = 0x4, + DBG_BLOCK_ID_SRBM = 0x5, + DBG_BLOCK_ID_GRBM = 0x6, + DBG_BLOCK_ID_RLC = 0x7, + DBG_BLOCK_ID_CSC = 0x8, + DBG_BLOCK_ID_SEM = 0x9, + DBG_BLOCK_ID_IH = 0xa, + DBG_BLOCK_ID_SC = 0xb, + DBG_BLOCK_ID_SQ = 0xc, + DBG_BLOCK_ID_AVP = 0xd, + DBG_BLOCK_ID_GMCON = 0xe, + DBG_BLOCK_ID_SMU = 0xf, + DBG_BLOCK_ID_DMA0 = 0x10, + DBG_BLOCK_ID_DMA1 = 0x11, + DBG_BLOCK_ID_SPIM = 0x12, + DBG_BLOCK_ID_GDS = 0x13, + DBG_BLOCK_ID_SPIS = 0x14, + DBG_BLOCK_ID_UNUSED0 = 0x15, + DBG_BLOCK_ID_PA0 = 0x16, + DBG_BLOCK_ID_PA1 = 0x17, + DBG_BLOCK_ID_CP0 = 0x18, + DBG_BLOCK_ID_CP1 = 0x19, + DBG_BLOCK_ID_CP2 = 0x1a, + DBG_BLOCK_ID_UNUSED1 = 0x1b, + DBG_BLOCK_ID_UVDU = 0x1c, + DBG_BLOCK_ID_UVDM = 0x1d, + DBG_BLOCK_ID_VCE = 0x1e, + DBG_BLOCK_ID_UNUSED2 = 0x1f, + DBG_BLOCK_ID_VGT0 = 0x20, + DBG_BLOCK_ID_VGT1 = 0x21, + DBG_BLOCK_ID_IA = 0x22, + DBG_BLOCK_ID_UNUSED3 = 0x23, + DBG_BLOCK_ID_SCT0 = 0x24, + DBG_BLOCK_ID_SCT1 = 0x25, + DBG_BLOCK_ID_SPM0 = 0x26, + DBG_BLOCK_ID_SPM1 = 0x27, + DBG_BLOCK_ID_TCAA = 0x28, + DBG_BLOCK_ID_TCAB = 0x29, + DBG_BLOCK_ID_TCCA = 0x2a, + DBG_BLOCK_ID_TCCB = 0x2b, + DBG_BLOCK_ID_MCC0 = 0x2c, + DBG_BLOCK_ID_MCC1 = 0x2d, + DBG_BLOCK_ID_MCC2 = 0x2e, + DBG_BLOCK_ID_MCC3 = 0x2f, + DBG_BLOCK_ID_SX0 = 0x30, + DBG_BLOCK_ID_SX1 = 0x31, + DBG_BLOCK_ID_SX2 = 0x32, + DBG_BLOCK_ID_SX3 = 0x33, + DBG_BLOCK_ID_UNUSED4 = 0x34, + DBG_BLOCK_ID_UNUSED5 = 0x35, + DBG_BLOCK_ID_UNUSED6 = 0x36, + DBG_BLOCK_ID_UNUSED7 = 0x37, + DBG_BLOCK_ID_PC0 = 0x38, + DBG_BLOCK_ID_PC1 = 0x39, + DBG_BLOCK_ID_UNUSED8 = 0x3a, + DBG_BLOCK_ID_UNUSED9 = 0x3b, + DBG_BLOCK_ID_UNUSED10 = 0x3c, + DBG_BLOCK_ID_UNUSED11 = 0x3d, + DBG_BLOCK_ID_MCB = 0x3e, + DBG_BLOCK_ID_UNUSED12 = 0x3f, + DBG_BLOCK_ID_SCB0 = 0x40, + DBG_BLOCK_ID_SCB1 = 0x41, + DBG_BLOCK_ID_UNUSED13 = 0x42, + DBG_BLOCK_ID_UNUSED14 = 0x43, + DBG_BLOCK_ID_SCF0 = 0x44, + DBG_BLOCK_ID_SCF1 = 0x45, + DBG_BLOCK_ID_UNUSED15 = 0x46, + DBG_BLOCK_ID_UNUSED16 = 0x47, + DBG_BLOCK_ID_BCI0 = 0x48, + DBG_BLOCK_ID_BCI1 = 0x49, + DBG_BLOCK_ID_BCI2 = 0x4a, + DBG_BLOCK_ID_BCI3 = 0x4b, + DBG_BLOCK_ID_UNUSED17 = 0x4c, + DBG_BLOCK_ID_UNUSED18 = 0x4d, + DBG_BLOCK_ID_UNUSED19 = 0x4e, + DBG_BLOCK_ID_UNUSED20 = 0x4f, + DBG_BLOCK_ID_CB00 = 0x50, + DBG_BLOCK_ID_CB01 = 0x51, + DBG_BLOCK_ID_CB02 = 0x52, + DBG_BLOCK_ID_CB03 = 0x53, + DBG_BLOCK_ID_CB04 = 0x54, + DBG_BLOCK_ID_UNUSED21 = 0x55, + DBG_BLOCK_ID_UNUSED22 = 0x56, + DBG_BLOCK_ID_UNUSED23 = 0x57, + DBG_BLOCK_ID_CB10 = 0x58, + DBG_BLOCK_ID_CB11 = 0x59, + DBG_BLOCK_ID_CB12 = 0x5a, + DBG_BLOCK_ID_CB13 = 0x5b, + DBG_BLOCK_ID_CB14 = 0x5c, + DBG_BLOCK_ID_UNUSED24 = 0x5d, + DBG_BLOCK_ID_UNUSED25 = 0x5e, + DBG_BLOCK_ID_UNUSED26 = 0x5f, + DBG_BLOCK_ID_TCP0 = 0x60, + DBG_BLOCK_ID_TCP1 = 0x61, + DBG_BLOCK_ID_TCP2 = 0x62, + DBG_BLOCK_ID_TCP3 = 0x63, + DBG_BLOCK_ID_TCP4 = 0x64, + DBG_BLOCK_ID_TCP5 = 0x65, + DBG_BLOCK_ID_TCP6 = 0x66, + DBG_BLOCK_ID_TCP7 = 0x67, + DBG_BLOCK_ID_TCP8 = 0x68, + DBG_BLOCK_ID_TCP9 = 0x69, + DBG_BLOCK_ID_TCP10 = 0x6a, + DBG_BLOCK_ID_TCP11 = 0x6b, + DBG_BLOCK_ID_TCP12 = 0x6c, + DBG_BLOCK_ID_TCP13 = 0x6d, + DBG_BLOCK_ID_TCP14 = 0x6e, + DBG_BLOCK_ID_TCP15 = 0x6f, + DBG_BLOCK_ID_TCP16 = 0x70, + DBG_BLOCK_ID_TCP17 = 0x71, + DBG_BLOCK_ID_TCP18 = 0x72, + DBG_BLOCK_ID_TCP19 = 0x73, + DBG_BLOCK_ID_TCP20 = 0x74, + DBG_BLOCK_ID_TCP21 = 0x75, + DBG_BLOCK_ID_TCP22 = 0x76, + DBG_BLOCK_ID_TCP23 = 0x77, + DBG_BLOCK_ID_TCP_RESERVED0 = 0x78, + DBG_BLOCK_ID_TCP_RESERVED1 = 0x79, + DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a, + DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b, + DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c, + DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d, + DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e, + DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f, + DBG_BLOCK_ID_DB00 = 0x80, + DBG_BLOCK_ID_DB01 = 0x81, + DBG_BLOCK_ID_DB02 = 0x82, + DBG_BLOCK_ID_DB03 = 0x83, + DBG_BLOCK_ID_DB04 = 0x84, + DBG_BLOCK_ID_UNUSED27 = 0x85, + DBG_BLOCK_ID_UNUSED28 = 0x86, + DBG_BLOCK_ID_UNUSED29 = 0x87, + DBG_BLOCK_ID_DB10 = 0x88, + DBG_BLOCK_ID_DB11 = 0x89, + DBG_BLOCK_ID_DB12 = 0x8a, + DBG_BLOCK_ID_DB13 = 0x8b, + DBG_BLOCK_ID_DB14 = 0x8c, + DBG_BLOCK_ID_UNUSED30 = 0x8d, + DBG_BLOCK_ID_UNUSED31 = 0x8e, + DBG_BLOCK_ID_UNUSED32 = 0x8f, + DBG_BLOCK_ID_TCC0 = 0x90, + DBG_BLOCK_ID_TCC1 = 0x91, + DBG_BLOCK_ID_TCC2 = 0x92, + DBG_BLOCK_ID_TCC3 = 0x93, + DBG_BLOCK_ID_TCC4 = 0x94, + DBG_BLOCK_ID_TCC5 = 0x95, + DBG_BLOCK_ID_TCC6 = 0x96, + DBG_BLOCK_ID_TCC7 = 0x97, + DBG_BLOCK_ID_SPS00 = 0x98, + DBG_BLOCK_ID_SPS01 = 0x99, + DBG_BLOCK_ID_SPS02 = 0x9a, + DBG_BLOCK_ID_SPS10 = 0x9b, + DBG_BLOCK_ID_SPS11 = 0x9c, + DBG_BLOCK_ID_SPS12 = 0x9d, + DBG_BLOCK_ID_UNUSED33 = 0x9e, + DBG_BLOCK_ID_UNUSED34 = 0x9f, + DBG_BLOCK_ID_TA00 = 0xa0, + DBG_BLOCK_ID_TA01 = 0xa1, + DBG_BLOCK_ID_TA02 = 0xa2, + DBG_BLOCK_ID_TA03 = 0xa3, + DBG_BLOCK_ID_TA04 = 0xa4, + DBG_BLOCK_ID_TA05 = 0xa5, + DBG_BLOCK_ID_TA06 = 0xa6, + DBG_BLOCK_ID_TA07 = 0xa7, + DBG_BLOCK_ID_TA08 = 0xa8, + DBG_BLOCK_ID_TA09 = 0xa9, + DBG_BLOCK_ID_TA0A = 0xaa, + DBG_BLOCK_ID_TA0B = 0xab, + DBG_BLOCK_ID_UNUSED35 = 0xac, + DBG_BLOCK_ID_UNUSED36 = 0xad, + DBG_BLOCK_ID_UNUSED37 = 0xae, + DBG_BLOCK_ID_UNUSED38 = 0xaf, + DBG_BLOCK_ID_TA10 = 0xb0, + DBG_BLOCK_ID_TA11 = 0xb1, + DBG_BLOCK_ID_TA12 = 0xb2, + DBG_BLOCK_ID_TA13 = 0xb3, + DBG_BLOCK_ID_TA14 = 0xb4, + DBG_BLOCK_ID_TA15 = 0xb5, + DBG_BLOCK_ID_TA16 = 0xb6, + DBG_BLOCK_ID_TA17 = 0xb7, + DBG_BLOCK_ID_TA18 = 0xb8, + DBG_BLOCK_ID_TA19 = 0xb9, + DBG_BLOCK_ID_TA1A = 0xba, + DBG_BLOCK_ID_TA1B = 0xbb, + DBG_BLOCK_ID_UNUSED39 = 0xbc, + DBG_BLOCK_ID_UNUSED40 = 0xbd, + DBG_BLOCK_ID_UNUSED41 = 0xbe, + DBG_BLOCK_ID_UNUSED42 = 0xbf, + DBG_BLOCK_ID_TD00 = 0xc0, + DBG_BLOCK_ID_TD01 = 0xc1, + DBG_BLOCK_ID_TD02 = 0xc2, + DBG_BLOCK_ID_TD03 = 0xc3, + DBG_BLOCK_ID_TD04 = 0xc4, + DBG_BLOCK_ID_TD05 = 0xc5, + DBG_BLOCK_ID_TD06 = 0xc6, + DBG_BLOCK_ID_TD07 = 0xc7, + DBG_BLOCK_ID_TD08 = 0xc8, + DBG_BLOCK_ID_TD09 = 0xc9, + DBG_BLOCK_ID_TD0A = 0xca, + DBG_BLOCK_ID_TD0B = 0xcb, + DBG_BLOCK_ID_UNUSED43 = 0xcc, + DBG_BLOCK_ID_UNUSED44 = 0xcd, + DBG_BLOCK_ID_UNUSED45 = 0xce, + DBG_BLOCK_ID_UNUSED46 = 0xcf, + DBG_BLOCK_ID_TD10 = 0xd0, + DBG_BLOCK_ID_TD11 = 0xd1, + DBG_BLOCK_ID_TD12 = 0xd2, + DBG_BLOCK_ID_TD13 = 0xd3, + DBG_BLOCK_ID_TD14 = 0xd4, + DBG_BLOCK_ID_TD15 = 0xd5, + DBG_BLOCK_ID_TD16 = 0xd6, + DBG_BLOCK_ID_TD17 = 0xd7, + DBG_BLOCK_ID_TD18 = 0xd8, + DBG_BLOCK_ID_TD19 = 0xd9, + DBG_BLOCK_ID_TD1A = 0xda, + DBG_BLOCK_ID_TD1B = 0xdb, + DBG_BLOCK_ID_UNUSED47 = 0xdc, + DBG_BLOCK_ID_UNUSED48 = 0xdd, + DBG_BLOCK_ID_UNUSED49 = 0xde, + DBG_BLOCK_ID_UNUSED50 = 0xdf, + DBG_BLOCK_ID_MCD0 = 0xe0, + DBG_BLOCK_ID_MCD1 = 0xe1, + DBG_BLOCK_ID_MCD2 = 0xe2, + DBG_BLOCK_ID_MCD3 = 0xe3, + DBG_BLOCK_ID_MCD4 = 0xe4, + DBG_BLOCK_ID_MCD5 = 0xe5, + DBG_BLOCK_ID_UNUSED51 = 0xe6, + DBG_BLOCK_ID_UNUSED52 = 0xe7, +} DebugBlockId_OLD; +typedef enum DebugBlockId_BY2 { + DBG_BLOCK_ID_RESERVED_BY2 = 0x0, + DBG_BLOCK_ID_VMC_BY2 = 0x1, + DBG_BLOCK_ID_CG_BY2 = 0x2, + DBG_BLOCK_ID_GRBM_BY2 = 0x3, + DBG_BLOCK_ID_CSC_BY2 = 0x4, + DBG_BLOCK_ID_IH_BY2 = 0x5, + DBG_BLOCK_ID_SQ_BY2 = 0x6, + DBG_BLOCK_ID_GMCON_BY2 = 0x7, + DBG_BLOCK_ID_DMA0_BY2 = 0x8, + DBG_BLOCK_ID_SPIM_BY2 = 0x9, + DBG_BLOCK_ID_SPIS_BY2 = 0xa, + DBG_BLOCK_ID_PA0_BY2 = 0xb, + DBG_BLOCK_ID_CP0_BY2 = 0xc, + DBG_BLOCK_ID_CP2_BY2 = 0xd, + DBG_BLOCK_ID_UVDU_BY2 = 0xe, + DBG_BLOCK_ID_VCE_BY2 = 0xf, + DBG_BLOCK_ID_VGT0_BY2 = 0x10, + DBG_BLOCK_ID_IA_BY2 = 0x11, + DBG_BLOCK_ID_SCT0_BY2 = 0x12, + DBG_BLOCK_ID_SPM0_BY2 = 0x13, + DBG_BLOCK_ID_TCAA_BY2 = 0x14, + DBG_BLOCK_ID_TCCA_BY2 = 0x15, + DBG_BLOCK_ID_MCC0_BY2 = 0x16, + DBG_BLOCK_ID_MCC2_BY2 = 0x17, + DBG_BLOCK_ID_SX0_BY2 = 0x18, + DBG_BLOCK_ID_SX2_BY2 = 0x19, + DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a, + DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b, + DBG_BLOCK_ID_PC0_BY2 = 0x1c, + DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d, + DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e, + DBG_BLOCK_ID_MCB_BY2 = 0x1f, + DBG_BLOCK_ID_SCB0_BY2 = 0x20, + DBG_BLOCK_ID_UNUSED13_BY2 = 0x21, + DBG_BLOCK_ID_SCF0_BY2 = 0x22, + DBG_BLOCK_ID_UNUSED15_BY2 = 0x23, + DBG_BLOCK_ID_BCI0_BY2 = 0x24, + DBG_BLOCK_ID_BCI2_BY2 = 0x25, + DBG_BLOCK_ID_UNUSED17_BY2 = 0x26, + DBG_BLOCK_ID_UNUSED19_BY2 = 0x27, + DBG_BLOCK_ID_CB00_BY2 = 0x28, + DBG_BLOCK_ID_CB02_BY2 = 0x29, + DBG_BLOCK_ID_CB04_BY2 = 0x2a, + DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b, + DBG_BLOCK_ID_CB10_BY2 = 0x2c, + DBG_BLOCK_ID_CB12_BY2 = 0x2d, + DBG_BLOCK_ID_CB14_BY2 = 0x2e, + DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f, + DBG_BLOCK_ID_TCP0_BY2 = 0x30, + DBG_BLOCK_ID_TCP2_BY2 = 0x31, + DBG_BLOCK_ID_TCP4_BY2 = 0x32, + DBG_BLOCK_ID_TCP6_BY2 = 0x33, + DBG_BLOCK_ID_TCP8_BY2 = 0x34, + DBG_BLOCK_ID_TCP10_BY2 = 0x35, + DBG_BLOCK_ID_TCP12_BY2 = 0x36, + DBG_BLOCK_ID_TCP14_BY2 = 0x37, + DBG_BLOCK_ID_TCP16_BY2 = 0x38, + DBG_BLOCK_ID_TCP18_BY2 = 0x39, + DBG_BLOCK_ID_TCP20_BY2 = 0x3a, + DBG_BLOCK_ID_TCP22_BY2 = 0x3b, + DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c, + DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d, + DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e, + DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f, + DBG_BLOCK_ID_DB00_BY2 = 0x40, + DBG_BLOCK_ID_DB02_BY2 = 0x41, + DBG_BLOCK_ID_DB04_BY2 = 0x42, + DBG_BLOCK_ID_UNUSED28_BY2 = 0x43, + DBG_BLOCK_ID_DB10_BY2 = 0x44, + DBG_BLOCK_ID_DB12_BY2 = 0x45, + DBG_BLOCK_ID_DB14_BY2 = 0x46, + DBG_BLOCK_ID_UNUSED31_BY2 = 0x47, + DBG_BLOCK_ID_TCC0_BY2 = 0x48, + DBG_BLOCK_ID_TCC2_BY2 = 0x49, + DBG_BLOCK_ID_TCC4_BY2 = 0x4a, + DBG_BLOCK_ID_TCC6_BY2 = 0x4b, + DBG_BLOCK_ID_SPS00_BY2 = 0x4c, + DBG_BLOCK_ID_SPS02_BY2 = 0x4d, + DBG_BLOCK_ID_SPS11_BY2 = 0x4e, + DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f, + DBG_BLOCK_ID_TA00_BY2 = 0x50, + DBG_BLOCK_ID_TA02_BY2 = 0x51, + DBG_BLOCK_ID_TA04_BY2 = 0x52, + DBG_BLOCK_ID_TA06_BY2 = 0x53, + DBG_BLOCK_ID_TA08_BY2 = 0x54, + DBG_BLOCK_ID_TA0A_BY2 = 0x55, + DBG_BLOCK_ID_UNUSED35_BY2 = 0x56, + DBG_BLOCK_ID_UNUSED37_BY2 = 0x57, + DBG_BLOCK_ID_TA10_BY2 = 0x58, + DBG_BLOCK_ID_TA12_BY2 = 0x59, + DBG_BLOCK_ID_TA14_BY2 = 0x5a, + DBG_BLOCK_ID_TA16_BY2 = 0x5b, + DBG_BLOCK_ID_TA18_BY2 = 0x5c, + DBG_BLOCK_ID_TA1A_BY2 = 0x5d, + DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e, + DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f, + DBG_BLOCK_ID_TD00_BY2 = 0x60, + DBG_BLOCK_ID_TD02_BY2 = 0x61, + DBG_BLOCK_ID_TD04_BY2 = 0x62, + DBG_BLOCK_ID_TD06_BY2 = 0x63, + DBG_BLOCK_ID_TD08_BY2 = 0x64, + DBG_BLOCK_ID_TD0A_BY2 = 0x65, + DBG_BLOCK_ID_UNUSED43_BY2 = 0x66, + DBG_BLOCK_ID_UNUSED45_BY2 = 0x67, + DBG_BLOCK_ID_TD10_BY2 = 0x68, + DBG_BLOCK_ID_TD12_BY2 = 0x69, + DBG_BLOCK_ID_TD14_BY2 = 0x6a, + DBG_BLOCK_ID_TD16_BY2 = 0x6b, + DBG_BLOCK_ID_TD18_BY2 = 0x6c, + DBG_BLOCK_ID_TD1A_BY2 = 0x6d, + DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e, + DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f, + DBG_BLOCK_ID_MCD0_BY2 = 0x70, + DBG_BLOCK_ID_MCD2_BY2 = 0x71, + DBG_BLOCK_ID_MCD4_BY2 = 0x72, + DBG_BLOCK_ID_UNUSED51_BY2 = 0x73, +} DebugBlockId_BY2; +typedef enum DebugBlockId_BY4 { + DBG_BLOCK_ID_RESERVED_BY4 = 0x0, + DBG_BLOCK_ID_CG_BY4 = 0x1, + DBG_BLOCK_ID_CSC_BY4 = 0x2, + DBG_BLOCK_ID_SQ_BY4 = 0x3, + DBG_BLOCK_ID_DMA0_BY4 = 0x4, + DBG_BLOCK_ID_SPIS_BY4 = 0x5, + DBG_BLOCK_ID_CP0_BY4 = 0x6, + DBG_BLOCK_ID_UVDU_BY4 = 0x7, + DBG_BLOCK_ID_VGT0_BY4 = 0x8, + DBG_BLOCK_ID_SCT0_BY4 = 0x9, + DBG_BLOCK_ID_TCAA_BY4 = 0xa, + DBG_BLOCK_ID_MCC0_BY4 = 0xb, + DBG_BLOCK_ID_SX0_BY4 = 0xc, + DBG_BLOCK_ID_UNUSED4_BY4 = 0xd, + DBG_BLOCK_ID_PC0_BY4 = 0xe, + DBG_BLOCK_ID_UNUSED10_BY4 = 0xf, + DBG_BLOCK_ID_SCB0_BY4 = 0x10, + DBG_BLOCK_ID_SCF0_BY4 = 0x11, + DBG_BLOCK_ID_BCI0_BY4 = 0x12, + DBG_BLOCK_ID_UNUSED17_BY4 = 0x13, + DBG_BLOCK_ID_CB00_BY4 = 0x14, + DBG_BLOCK_ID_CB04_BY4 = 0x15, + DBG_BLOCK_ID_CB10_BY4 = 0x16, + DBG_BLOCK_ID_CB14_BY4 = 0x17, + DBG_BLOCK_ID_TCP0_BY4 = 0x18, + DBG_BLOCK_ID_TCP4_BY4 = 0x19, + DBG_BLOCK_ID_TCP8_BY4 = 0x1a, + DBG_BLOCK_ID_TCP12_BY4 = 0x1b, + DBG_BLOCK_ID_TCP16_BY4 = 0x1c, + DBG_BLOCK_ID_TCP20_BY4 = 0x1d, + DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e, + DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f, + DBG_BLOCK_ID_DB_BY4 = 0x20, + DBG_BLOCK_ID_DB04_BY4 = 0x21, + DBG_BLOCK_ID_DB10_BY4 = 0x22, + DBG_BLOCK_ID_DB14_BY4 = 0x23, + DBG_BLOCK_ID_TCC0_BY4 = 0x24, + DBG_BLOCK_ID_TCC4_BY4 = 0x25, + DBG_BLOCK_ID_SPS00_BY4 = 0x26, + DBG_BLOCK_ID_SPS11_BY4 = 0x27, + DBG_BLOCK_ID_TA00_BY4 = 0x28, + DBG_BLOCK_ID_TA04_BY4 = 0x29, + DBG_BLOCK_ID_TA08_BY4 = 0x2a, + DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b, + DBG_BLOCK_ID_TA10_BY4 = 0x2c, + DBG_BLOCK_ID_TA14_BY4 = 0x2d, + DBG_BLOCK_ID_TA18_BY4 = 0x2e, + DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f, + DBG_BLOCK_ID_TD00_BY4 = 0x30, + DBG_BLOCK_ID_TD04_BY4 = 0x31, + DBG_BLOCK_ID_TD08_BY4 = 0x32, + DBG_BLOCK_ID_UNUSED43_BY4 = 0x33, + DBG_BLOCK_ID_TD10_BY4 = 0x34, + DBG_BLOCK_ID_TD14_BY4 = 0x35, + DBG_BLOCK_ID_TD18_BY4 = 0x36, + DBG_BLOCK_ID_UNUSED47_BY4 = 0x37, + DBG_BLOCK_ID_MCD0_BY4 = 0x38, + DBG_BLOCK_ID_MCD4_BY4 = 0x39, +} DebugBlockId_BY4; +typedef enum DebugBlockId_BY8 { + DBG_BLOCK_ID_RESERVED_BY8 = 0x0, + DBG_BLOCK_ID_CSC_BY8 = 0x1, + DBG_BLOCK_ID_DMA0_BY8 = 0x2, + DBG_BLOCK_ID_CP0_BY8 = 0x3, + DBG_BLOCK_ID_VGT0_BY8 = 0x4, + DBG_BLOCK_ID_TCAA_BY8 = 0x5, + DBG_BLOCK_ID_SX0_BY8 = 0x6, + DBG_BLOCK_ID_PC0_BY8 = 0x7, + DBG_BLOCK_ID_SCB0_BY8 = 0x8, + DBG_BLOCK_ID_BCI0_BY8 = 0x9, + DBG_BLOCK_ID_CB00_BY8 = 0xa, + DBG_BLOCK_ID_CB10_BY8 = 0xb, + DBG_BLOCK_ID_TCP0_BY8 = 0xc, + DBG_BLOCK_ID_TCP8_BY8 = 0xd, + DBG_BLOCK_ID_TCP16_BY8 = 0xe, + DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf, + DBG_BLOCK_ID_DB00_BY8 = 0x10, + DBG_BLOCK_ID_DB10_BY8 = 0x11, + DBG_BLOCK_ID_TCC0_BY8 = 0x12, + DBG_BLOCK_ID_SPS00_BY8 = 0x13, + DBG_BLOCK_ID_TA00_BY8 = 0x14, + DBG_BLOCK_ID_TA08_BY8 = 0x15, + DBG_BLOCK_ID_TA10_BY8 = 0x16, + DBG_BLOCK_ID_TA18_BY8 = 0x17, + DBG_BLOCK_ID_TD00_BY8 = 0x18, + DBG_BLOCK_ID_TD08_BY8 = 0x19, + DBG_BLOCK_ID_TD10_BY8 = 0x1a, + DBG_BLOCK_ID_TD18_BY8 = 0x1b, + DBG_BLOCK_ID_MCD0_BY8 = 0x1c, +} DebugBlockId_BY8; +typedef enum DebugBlockId_BY16 { + DBG_BLOCK_ID_RESERVED_BY16 = 0x0, + DBG_BLOCK_ID_DMA0_BY16 = 0x1, + DBG_BLOCK_ID_VGT0_BY16 = 0x2, + DBG_BLOCK_ID_SX0_BY16 = 0x3, + DBG_BLOCK_ID_SCB0_BY16 = 0x4, + DBG_BLOCK_ID_CB00_BY16 = 0x5, + DBG_BLOCK_ID_TCP0_BY16 = 0x6, + DBG_BLOCK_ID_TCP16_BY16 = 0x7, + DBG_BLOCK_ID_DB00_BY16 = 0x8, + DBG_BLOCK_ID_TCC0_BY16 = 0x9, + DBG_BLOCK_ID_TA00_BY16 = 0xa, + DBG_BLOCK_ID_TA10_BY16 = 0xb, + DBG_BLOCK_ID_TD00_BY16 = 0xc, + DBG_BLOCK_ID_TD10_BY16 = 0xd, + DBG_BLOCK_ID_MCD0_BY16 = 0xe, +} DebugBlockId_BY16; +typedef enum ColorTransform { + DCC_CT_AUTO = 0x0, + DCC_CT_NONE = 0x1, + ABGR_TO_A_BG_G_RB = 0x2, + BGRA_TO_BG_G_RB_A = 0x3, +} ColorTransform; +typedef enum CompareRef { + REF_NEVER = 0x0, + REF_LESS = 0x1, + REF_EQUAL = 0x2, + REF_LEQUAL = 0x3, + REF_GREATER = 0x4, + REF_NOTEQUAL = 0x5, + REF_GEQUAL = 0x6, + REF_ALWAYS = 0x7, +} CompareRef; +typedef enum ReadSize { + READ_256_BITS = 0x0, + READ_512_BITS = 0x1, +} ReadSize; +typedef enum DepthFormat { + DEPTH_INVALID = 0x0, + DEPTH_16 = 0x1, + DEPTH_X8_24 = 0x2, + DEPTH_8_24 = 0x3, + DEPTH_X8_24_FLOAT = 0x4, + DEPTH_8_24_FLOAT = 0x5, + DEPTH_32_FLOAT = 0x6, + DEPTH_X24_8_32_FLOAT = 0x7, +} DepthFormat; +typedef enum ZFormat { + Z_INVALID = 0x0, + Z_16 = 0x1, + Z_24 = 0x2, + Z_32_FLOAT = 0x3, +} ZFormat; +typedef enum StencilFormat { + STENCIL_INVALID = 0x0, + STENCIL_8 = 0x1, +} StencilFormat; +typedef enum CmaskMode { + CMASK_CLEAR_NONE = 0x0, + CMASK_CLEAR_ONE = 0x1, + CMASK_CLEAR_ALL = 0x2, + CMASK_ANY_EXPANDED = 0x3, + CMASK_ALPHA0_FRAG1 = 0x4, + CMASK_ALPHA0_FRAG2 = 0x5, + CMASK_ALPHA0_FRAG4 = 0x6, + CMASK_ALPHA0_FRAGS = 0x7, + CMASK_ALPHA1_FRAG1 = 0x8, + CMASK_ALPHA1_FRAG2 = 0x9, + CMASK_ALPHA1_FRAG4 = 0xa, + CMASK_ALPHA1_FRAGS = 0xb, + CMASK_ALPHAX_FRAG1 = 0xc, + CMASK_ALPHAX_FRAG2 = 0xd, + CMASK_ALPHAX_FRAG4 = 0xe, + CMASK_ALPHAX_FRAGS = 0xf, +} CmaskMode; +typedef enum QuadExportFormat { + EXPORT_UNUSED = 0x0, + EXPORT_32_R = 0x1, + EXPORT_32_GR = 0x2, + EXPORT_32_AR = 0x3, + EXPORT_FP16_ABGR = 0x4, + EXPORT_UNSIGNED16_ABGR = 0x5, + EXPORT_SIGNED16_ABGR = 0x6, + EXPORT_32_ABGR = 0x7, +} QuadExportFormat; +typedef enum QuadExportFormatOld { + EXPORT_4P_32BPC_ABGR = 0x0, + EXPORT_4P_16BPC_ABGR = 0x1, + EXPORT_4P_32BPC_GR = 0x2, + EXPORT_4P_32BPC_AR = 0x3, + EXPORT_2P_32BPC_ABGR = 0x4, + EXPORT_8P_32BPC_R = 0x5, +} QuadExportFormatOld; +typedef enum ColorFormat { + COLOR_INVALID = 0x0, + COLOR_8 = 0x1, + COLOR_16 = 0x2, + COLOR_8_8 = 0x3, + COLOR_32 = 0x4, + COLOR_16_16 = 0x5, + COLOR_10_11_11 = 0x6, + COLOR_11_11_10 = 0x7, + COLOR_10_10_10_2 = 0x8, + COLOR_2_10_10_10 = 0x9, + COLOR_8_8_8_8 = 0xa, + COLOR_32_32 = 0xb, + COLOR_16_16_16_16 = 0xc, + COLOR_RESERVED_13 = 0xd, + COLOR_32_32_32_32 = 0xe, + COLOR_RESERVED_15 = 0xf, + COLOR_5_6_5 = 0x10, + COLOR_1_5_5_5 = 0x11, + COLOR_5_5_5_1 = 0x12, + COLOR_4_4_4_4 = 0x13, + COLOR_8_24 = 0x14, + COLOR_24_8 = 0x15, + COLOR_X24_8_32_FLOAT = 0x16, + COLOR_RESERVED_23 = 0x17, +} ColorFormat; +typedef enum SurfaceFormat { + FMT_INVALID = 0x0, + FMT_8 = 0x1, + FMT_16 = 0x2, + FMT_8_8 = 0x3, + FMT_32 = 0x4, + FMT_16_16 = 0x5, + FMT_10_11_11 = 0x6, + FMT_11_11_10 = 0x7, + FMT_10_10_10_2 = 0x8, + FMT_2_10_10_10 = 0x9, + FMT_8_8_8_8 = 0xa, + FMT_32_32 = 0xb, + FMT_16_16_16_16 = 0xc, + FMT_32_32_32 = 0xd, + FMT_32_32_32_32 = 0xe, + FMT_RESERVED_4 = 0xf, + FMT_5_6_5 = 0x10, + FMT_1_5_5_5 = 0x11, + FMT_5_5_5_1 = 0x12, + FMT_4_4_4_4 = 0x13, + FMT_8_24 = 0x14, + FMT_24_8 = 0x15, + FMT_X24_8_32_FLOAT = 0x16, + FMT_RESERVED_33 = 0x17, + FMT_11_11_10_FLOAT = 0x18, + FMT_16_FLOAT = 0x19, + FMT_32_FLOAT = 0x1a, + FMT_16_16_FLOAT = 0x1b, + FMT_8_24_FLOAT = 0x1c, + FMT_24_8_FLOAT = 0x1d, + FMT_32_32_FLOAT = 0x1e, + FMT_10_11_11_FLOAT = 0x1f, + FMT_16_16_16_16_FLOAT = 0x20, + FMT_3_3_2 = 0x21, + FMT_6_5_5 = 0x22, + FMT_32_32_32_32_FLOAT = 0x23, + FMT_RESERVED_36 = 0x24, + FMT_1 = 0x25, + FMT_1_REVERSED = 0x26, + FMT_GB_GR = 0x27, + FMT_BG_RG = 0x28, + FMT_32_AS_8 = 0x29, + FMT_32_AS_8_8 = 0x2a, + FMT_5_9_9_9_SHAREDEXP = 0x2b, + FMT_8_8_8 = 0x2c, + FMT_16_16_16 = 0x2d, + FMT_16_16_16_FLOAT = 0x2e, + FMT_4_4 = 0x2f, + FMT_32_32_32_FLOAT = 0x30, + FMT_BC1 = 0x31, + FMT_BC2 = 0x32, + FMT_BC3 = 0x33, + FMT_BC4 = 0x34, + FMT_BC5 = 0x35, + FMT_BC6 = 0x36, + FMT_BC7 = 0x37, + FMT_32_AS_32_32_32_32 = 0x38, + FMT_APC3 = 0x39, + FMT_APC4 = 0x3a, + FMT_APC5 = 0x3b, + FMT_APC6 = 0x3c, + FMT_APC7 = 0x3d, + FMT_CTX1 = 0x3e, + FMT_RESERVED_63 = 0x3f, +} SurfaceFormat; +typedef enum BUF_DATA_FORMAT { + BUF_DATA_FORMAT_INVALID = 0x0, + BUF_DATA_FORMAT_8 = 0x1, + BUF_DATA_FORMAT_16 = 0x2, + BUF_DATA_FORMAT_8_8 = 0x3, + BUF_DATA_FORMAT_32 = 0x4, + BUF_DATA_FORMAT_16_16 = 0x5, + BUF_DATA_FORMAT_10_11_11 = 0x6, + BUF_DATA_FORMAT_11_11_10 = 0x7, + BUF_DATA_FORMAT_10_10_10_2 = 0x8, + BUF_DATA_FORMAT_2_10_10_10 = 0x9, + BUF_DATA_FORMAT_8_8_8_8 = 0xa, + BUF_DATA_FORMAT_32_32 = 0xb, + BUF_DATA_FORMAT_16_16_16_16 = 0xc, + BUF_DATA_FORMAT_32_32_32 = 0xd, + BUF_DATA_FORMAT_32_32_32_32 = 0xe, + BUF_DATA_FORMAT_RESERVED_15 = 0xf, +} BUF_DATA_FORMAT; +typedef enum IMG_DATA_FORMAT { + IMG_DATA_FORMAT_INVALID = 0x0, + IMG_DATA_FORMAT_8 = 0x1, + IMG_DATA_FORMAT_16 = 0x2, + IMG_DATA_FORMAT_8_8 = 0x3, + IMG_DATA_FORMAT_32 = 0x4, + IMG_DATA_FORMAT_16_16 = 0x5, + IMG_DATA_FORMAT_10_11_11 = 0x6, + IMG_DATA_FORMAT_11_11_10 = 0x7, + IMG_DATA_FORMAT_10_10_10_2 = 0x8, + IMG_DATA_FORMAT_2_10_10_10 = 0x9, + IMG_DATA_FORMAT_8_8_8_8 = 0xa, + IMG_DATA_FORMAT_32_32 = 0xb, + IMG_DATA_FORMAT_16_16_16_16 = 0xc, + IMG_DATA_FORMAT_32_32_32 = 0xd, + IMG_DATA_FORMAT_32_32_32_32 = 0xe, + IMG_DATA_FORMAT_RESERVED_15 = 0xf, + IMG_DATA_FORMAT_5_6_5 = 0x10, + IMG_DATA_FORMAT_1_5_5_5 = 0x11, + IMG_DATA_FORMAT_5_5_5_1 = 0x12, + IMG_DATA_FORMAT_4_4_4_4 = 0x13, + IMG_DATA_FORMAT_8_24 = 0x14, + IMG_DATA_FORMAT_24_8 = 0x15, + IMG_DATA_FORMAT_X24_8_32 = 0x16, + IMG_DATA_FORMAT_RESERVED_23 = 0x17, + IMG_DATA_FORMAT_RESERVED_24 = 0x18, + IMG_DATA_FORMAT_RESERVED_25 = 0x19, + IMG_DATA_FORMAT_RESERVED_26 = 0x1a, + IMG_DATA_FORMAT_RESERVED_27 = 0x1b, + IMG_DATA_FORMAT_RESERVED_28 = 0x1c, + IMG_DATA_FORMAT_RESERVED_29 = 0x1d, + IMG_DATA_FORMAT_RESERVED_30 = 0x1e, + IMG_DATA_FORMAT_RESERVED_31 = 0x1f, + IMG_DATA_FORMAT_GB_GR = 0x20, + IMG_DATA_FORMAT_BG_RG = 0x21, + IMG_DATA_FORMAT_5_9_9_9 = 0x22, + IMG_DATA_FORMAT_BC1 = 0x23, + IMG_DATA_FORMAT_BC2 = 0x24, + IMG_DATA_FORMAT_BC3 = 0x25, + IMG_DATA_FORMAT_BC4 = 0x26, + IMG_DATA_FORMAT_BC5 = 0x27, + IMG_DATA_FORMAT_BC6 = 0x28, + IMG_DATA_FORMAT_BC7 = 0x29, + IMG_DATA_FORMAT_RESERVED_42 = 0x2a, + IMG_DATA_FORMAT_RESERVED_43 = 0x2b, + IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c, + IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d, + IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e, + IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f, + IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30, + IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31, + IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32, + IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33, + IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34, + IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35, + IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36, + IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37, + IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38, + IMG_DATA_FORMAT_4_4 = 0x39, + IMG_DATA_FORMAT_6_5_5 = 0x3a, + IMG_DATA_FORMAT_1 = 0x3b, + IMG_DATA_FORMAT_1_REVERSED = 0x3c, + IMG_DATA_FORMAT_32_AS_8 = 0x3d, + IMG_DATA_FORMAT_32_AS_8_8 = 0x3e, + IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f, +} IMG_DATA_FORMAT; +typedef enum BUF_NUM_FORMAT { + BUF_NUM_FORMAT_UNORM = 0x0, + BUF_NUM_FORMAT_SNORM = 0x1, + BUF_NUM_FORMAT_USCALED = 0x2, + BUF_NUM_FORMAT_SSCALED = 0x3, + BUF_NUM_FORMAT_UINT = 0x4, + BUF_NUM_FORMAT_SINT = 0x5, + BUF_NUM_FORMAT_RESERVED_6 = 0x6, + BUF_NUM_FORMAT_FLOAT = 0x7, +} BUF_NUM_FORMAT; +typedef enum IMG_NUM_FORMAT { + IMG_NUM_FORMAT_UNORM = 0x0, + IMG_NUM_FORMAT_SNORM = 0x1, + IMG_NUM_FORMAT_USCALED = 0x2, + IMG_NUM_FORMAT_SSCALED = 0x3, + IMG_NUM_FORMAT_UINT = 0x4, + IMG_NUM_FORMAT_SINT = 0x5, + IMG_NUM_FORMAT_RESERVED_6 = 0x6, + IMG_NUM_FORMAT_FLOAT = 0x7, + IMG_NUM_FORMAT_RESERVED_8 = 0x8, + IMG_NUM_FORMAT_SRGB = 0x9, + IMG_NUM_FORMAT_RESERVED_10 = 0xa, + IMG_NUM_FORMAT_RESERVED_11 = 0xb, + IMG_NUM_FORMAT_RESERVED_12 = 0xc, + IMG_NUM_FORMAT_RESERVED_13 = 0xd, + IMG_NUM_FORMAT_RESERVED_14 = 0xe, + IMG_NUM_FORMAT_RESERVED_15 = 0xf, +} IMG_NUM_FORMAT; +typedef enum TileType { + ARRAY_COLOR_TILE = 0x0, + ARRAY_DEPTH_TILE = 0x1, +} TileType; +typedef enum NonDispTilingOrder { + ADDR_SURF_MICRO_TILING_DISPLAY = 0x0, + ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1, +} NonDispTilingOrder; +typedef enum MicroTileMode { + ADDR_SURF_DISPLAY_MICRO_TILING = 0x0, + ADDR_SURF_THIN_MICRO_TILING = 0x1, + ADDR_SURF_DEPTH_MICRO_TILING = 0x2, + ADDR_SURF_ROTATED_MICRO_TILING = 0x3, + ADDR_SURF_THICK_MICRO_TILING = 0x4, +} MicroTileMode; +typedef enum TileSplit { + ADDR_SURF_TILE_SPLIT_64B = 0x0, + ADDR_SURF_TILE_SPLIT_128B = 0x1, + ADDR_SURF_TILE_SPLIT_256B = 0x2, + ADDR_SURF_TILE_SPLIT_512B = 0x3, + ADDR_SURF_TILE_SPLIT_1KB = 0x4, + ADDR_SURF_TILE_SPLIT_2KB = 0x5, + ADDR_SURF_TILE_SPLIT_4KB = 0x6, +} TileSplit; +typedef enum SampleSplit { + ADDR_SURF_SAMPLE_SPLIT_1 = 0x0, + ADDR_SURF_SAMPLE_SPLIT_2 = 0x1, + ADDR_SURF_SAMPLE_SPLIT_4 = 0x2, + ADDR_SURF_SAMPLE_SPLIT_8 = 0x3, +} SampleSplit; +typedef enum PipeConfig { + ADDR_SURF_P2 = 0x0, + ADDR_SURF_P2_RESERVED0 = 0x1, + ADDR_SURF_P2_RESERVED1 = 0x2, + ADDR_SURF_P2_RESERVED2 = 0x3, + ADDR_SURF_P4_8x16 = 0x4, + ADDR_SURF_P4_16x16 = 0x5, + ADDR_SURF_P4_16x32 = 0x6, + ADDR_SURF_P4_32x32 = 0x7, + ADDR_SURF_P8_16x16_8x16 = 0x8, + ADDR_SURF_P8_16x32_8x16 = 0x9, + ADDR_SURF_P8_32x32_8x16 = 0xa, + ADDR_SURF_P8_16x32_16x16 = 0xb, + ADDR_SURF_P8_32x32_16x16 = 0xc, + ADDR_SURF_P8_32x32_16x32 = 0xd, + ADDR_SURF_P8_32x64_32x32 = 0xe, + ADDR_SURF_P8_RESERVED0 = 0xf, + ADDR_SURF_P16_32x32_8x16 = 0x10, + ADDR_SURF_P16_32x32_16x16 = 0x11, +} PipeConfig; +typedef enum NumBanks { + ADDR_SURF_2_BANK = 0x0, + ADDR_SURF_4_BANK = 0x1, + ADDR_SURF_8_BANK = 0x2, + ADDR_SURF_16_BANK = 0x3, +} NumBanks; +typedef enum BankWidth { + ADDR_SURF_BANK_WIDTH_1 = 0x0, + ADDR_SURF_BANK_WIDTH_2 = 0x1, + ADDR_SURF_BANK_WIDTH_4 = 0x2, + ADDR_SURF_BANK_WIDTH_8 = 0x3, +} BankWidth; +typedef enum BankHeight { + ADDR_SURF_BANK_HEIGHT_1 = 0x0, + ADDR_SURF_BANK_HEIGHT_2 = 0x1, + ADDR_SURF_BANK_HEIGHT_4 = 0x2, + ADDR_SURF_BANK_HEIGHT_8 = 0x3, +} BankHeight; +typedef enum BankWidthHeight { + ADDR_SURF_BANK_WH_1 = 0x0, + ADDR_SURF_BANK_WH_2 = 0x1, + ADDR_SURF_BANK_WH_4 = 0x2, + ADDR_SURF_BANK_WH_8 = 0x3, +} BankWidthHeight; +typedef enum MacroTileAspect { + ADDR_SURF_MACRO_ASPECT_1 = 0x0, + ADDR_SURF_MACRO_ASPECT_2 = 0x1, + ADDR_SURF_MACRO_ASPECT_4 = 0x2, + ADDR_SURF_MACRO_ASPECT_8 = 0x3, +} MacroTileAspect; +typedef enum GATCL1RequestType { + GATCL1_TYPE_NORMAL = 0x0, + GATCL1_TYPE_SHOOTDOWN = 0x1, + GATCL1_TYPE_BYPASS = 0x2, +} GATCL1RequestType; +typedef enum TCC_CACHE_POLICIES { + TCC_CACHE_POLICY_LRU = 0x0, + TCC_CACHE_POLICY_STREAM = 0x1, +} TCC_CACHE_POLICIES; +typedef enum MTYPE { + MTYPE_NC_NV = 0x0, + MTYPE_NC = 0x1, + MTYPE_CC = 0x2, + MTYPE_UC = 0x3, +} MTYPE; +typedef enum PERFMON_COUNTER_MODE { + PERFMON_COUNTER_MODE_ACCUM = 0x0, + PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1, + PERFMON_COUNTER_MODE_MAX = 0x2, + PERFMON_COUNTER_MODE_DIRTY = 0x3, + PERFMON_COUNTER_MODE_SAMPLE = 0x4, + PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5, + PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6, + PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7, + PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8, + PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9, + PERFMON_COUNTER_MODE_RESERVED = 0xf, +} PERFMON_COUNTER_MODE; +typedef enum PERFMON_SPM_MODE { + PERFMON_SPM_MODE_OFF = 0x0, + PERFMON_SPM_MODE_16BIT_CLAMP = 0x1, + PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2, + PERFMON_SPM_MODE_32BIT_CLAMP = 0x3, + PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4, + PERFMON_SPM_MODE_RESERVED_5 = 0x5, + PERFMON_SPM_MODE_RESERVED_6 = 0x6, + PERFMON_SPM_MODE_RESERVED_7 = 0x7, + PERFMON_SPM_MODE_TEST_MODE_0 = 0x8, + PERFMON_SPM_MODE_TEST_MODE_1 = 0x9, + PERFMON_SPM_MODE_TEST_MODE_2 = 0xa, +} PERFMON_SPM_MODE; +typedef enum SurfaceTiling { + ARRAY_LINEAR = 0x0, + ARRAY_TILED = 0x1, +} SurfaceTiling; +typedef enum SurfaceArray { + ARRAY_1D = 0x0, + ARRAY_2D = 0x1, + ARRAY_3D = 0x2, + ARRAY_3D_SLICE = 0x3, +} SurfaceArray; +typedef enum ColorArray { + ARRAY_2D_ALT_COLOR = 0x0, + ARRAY_2D_COLOR = 0x1, + ARRAY_3D_SLICE_COLOR = 0x3, +} ColorArray; +typedef enum DepthArray { + ARRAY_2D_ALT_DEPTH = 0x0, + ARRAY_2D_DEPTH = 0x1, +} DepthArray; +typedef enum ENUM_NUM_SIMD_PER_CU { + NUM_SIMD_PER_CU = 0x4, +} ENUM_NUM_SIMD_PER_CU; +typedef enum MEM_PWR_FORCE_CTRL { + NO_FORCE_REQUEST = 0x0, + FORCE_LIGHT_SLEEP_REQUEST = 0x1, + FORCE_DEEP_SLEEP_REQUEST = 0x2, + FORCE_SHUT_DOWN_REQUEST = 0x3, +} MEM_PWR_FORCE_CTRL; +typedef enum MEM_PWR_FORCE_CTRL2 { + NO_FORCE_REQ = 0x0, + FORCE_LIGHT_SLEEP_REQ = 0x1, +} MEM_PWR_FORCE_CTRL2; +typedef enum MEM_PWR_DIS_CTRL { + ENABLE_MEM_PWR_CTRL = 0x0, + DISABLE_MEM_PWR_CTRL = 0x1, +} MEM_PWR_DIS_CTRL; +typedef enum MEM_PWR_SEL_CTRL { + DYNAMIC_SHUT_DOWN_ENABLE = 0x0, + DYNAMIC_DEEP_SLEEP_ENABLE = 0x1, + DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2, +} MEM_PWR_SEL_CTRL; +typedef enum MEM_PWR_SEL_CTRL2 { + DYNAMIC_DEEP_SLEEP_EN = 0x0, + DYNAMIC_LIGHT_SLEEP_EN = 0x1, +} MEM_PWR_SEL_CTRL2; + +#endif /* SMU_7_1_3_ENUM_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h new file mode 100644 index 000000000000..1ede9e274714 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h @@ -0,0 +1,6080 @@ +/* + * SMU_7_1_3 Register documentation + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef SMU_7_1_3_SH_MASK_H +#define SMU_7_1_3_SH_MASK_H + +#define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define GCK_MCLK_FUSES__StartupMClkDid_MASK 0x7f +#define GCK_MCLK_FUSES__StartupMClkDid__SHIFT 0x0 +#define GCK_MCLK_FUSES__MClkADCA_MASK 0x780 +#define GCK_MCLK_FUSES__MClkADCA__SHIFT 0x7 +#define GCK_MCLK_FUSES__MClkDDCA_MASK 0x1800 +#define GCK_MCLK_FUSES__MClkDDCA__SHIFT 0xb +#define GCK_MCLK_FUSES__MClkDiDtWait_MASK 0xe000 +#define GCK_MCLK_FUSES__MClkDiDtWait__SHIFT 0xd +#define GCK_MCLK_FUSES__MClkDiDtFloor_MASK 0x30000 +#define GCK_MCLK_FUSES__MClkDiDtFloor__SHIFT 0x10 +#define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f +#define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1 +#define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0 +#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f +#define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1 +#define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0 +#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f +#define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1 +#define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0 +#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f +#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_MCLK_CNTL__MCLK_DIVIDER_MASK 0x7f +#define CG_MCLK_CNTL__MCLK_DIVIDER__SHIFT 0x0 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN_MASK 0x100 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_MCLK_STATUS__MCLK_STATUS_MASK 0x1 +#define CG_MCLK_STATUS__MCLK_STATUS__SHIFT 0x0 +#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1 +#define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0 +#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2 +#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1 +#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4 +#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2 +#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8 +#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3 +#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10 +#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4 +#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20 +#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5 +#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40 +#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6 +#define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80 +#define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7 +#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100 +#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8 +#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200 +#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9 +#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400 +#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa +#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800 +#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb +#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000 +#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc +#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK_MASK 0x2000 +#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK__SHIFT 0xd +#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1 +#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2 +#define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4 +#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0 +#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x7f00000 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x14 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK_MASK 0x8000000 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK__SHIFT 0x1b +#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000 +#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb +#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17 +#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18 +#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19 +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000 +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a +#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b +#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e +#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff +#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000 +#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0xe00 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0x9 +#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV_MASK 0x7f000 +#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV__SHIFT 0xc +#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000 +#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15 +#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17 +#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18 +#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f +#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1 +#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2 +#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1 +#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc +#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2 +#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30 +#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8 +#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200 +#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9 +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00 +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15 +#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 +#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff +#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 +#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 +#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 +#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1 +#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4 +#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2 +#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8 +#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3 +#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10 +#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4 +#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00 +#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa +#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000 +#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc +#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000 +#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c +#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000 +#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d +#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1 +#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0 +#define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0 +#define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4 +#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff +#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0 +#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00 +#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8 +#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2 +#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1 +#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4 +#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2 +#define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1 +#define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0 +#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8 +#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3 +#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100 +#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8 +#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000 +#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe +#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000 +#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf +#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000 +#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10 +#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000 +#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11 +#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000 +#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12 +#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000 +#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13 +#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000 +#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14 +#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000 +#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15 +#define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000 +#define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16 +#define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000 +#define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18 +#define CG_CLKPIN_CNTL_DC__OSC_EN_MASK 0x1 +#define CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT 0x0 +#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN_MASK 0x6 +#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN__SHIFT 0x1 +#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN_MASK 0x200 +#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN__SHIFT 0x9 +#define CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK 0x1c00 +#define CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT 0xa +#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff +#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0 +#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00 +#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8 +#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000 +#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10 +#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff +#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0 +#define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00 +#define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8 +#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000 +#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10 +#define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f +#define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0 +#define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0 +#define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5 +#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00 +#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa +#define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000 +#define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11 +#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000 +#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12 +#define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000 +#define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3 +#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0 +#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9 +#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc +#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf +#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b +#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8_MASK 0x100 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8__SHIFT 0x8 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9_MASK 0x200 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9__SHIFT 0x9 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10_MASK 0x400 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10__SHIFT 0xa +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11_MASK 0x800 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11__SHIFT 0xb +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12_MASK 0x1000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12__SHIFT 0xc +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13_MASK 0x2000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13__SHIFT 0xd +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14_MASK 0x4000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14__SHIFT 0xe +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15_MASK 0x8000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15__SHIFT 0xf +#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_0__SMC_RESP_MASK 0xffff +#define SMC_RESP_0__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_1__SMC_RESP_MASK 0xffff +#define SMC_RESP_1__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_2__SMC_RESP_MASK 0xffff +#define SMC_RESP_2__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_3__SMC_RESP_MASK 0xffff +#define SMC_RESP_3__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_4__SMC_RESP_MASK 0xffff +#define SMC_RESP_4__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_5__SMC_RESP_MASK 0xffff +#define SMC_RESP_5__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_6__SMC_RESP_MASK 0xffff +#define SMC_RESP_6__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_7__SMC_RESP_MASK 0xffff +#define SMC_RESP_7__SMC_RESP__SHIFT 0x0 +#define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_8__SMC_RESP_MASK 0xffff +#define SMC_RESP_8__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_9__SMC_RESP_MASK 0xffff +#define SMC_RESP_9__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_10__SMC_RESP_MASK 0xffff +#define SMC_RESP_10__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_11__SMC_RESP_MASK 0xffff +#define SMC_RESP_11__SMC_RESP__SHIFT 0x0 +#define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1 +#define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0 +#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2 +#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1 +#define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000 +#define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e +#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1 +#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8 +#define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000 +#define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18 +#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1 +#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0 +#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff +#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0 +#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding_MASK 0x2 +#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding__SHIFT 0x1 +#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff +#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0 +#define SMC_PC_C__smc_pc_c_MASK 0xffffffff +#define SMC_PC_C__smc_pc_c__SHIFT 0x0 +#define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff +#define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0 +#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x1 +#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0 +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0xf +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x0 +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0xf0 +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x4 +#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffff +#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0 +#define GPIOPAD_A__GPIO_A_MASK 0x7fffffff +#define GPIOPAD_A__GPIO_A__SHIFT 0x0 +#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffff +#define GPIOPAD_EN__GPIO_EN__SHIFT 0x0 +#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffff +#define GPIOPAD_Y__GPIO_Y__SHIFT 0x0 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x1 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x2 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x4 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x8 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x10 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x20 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x40 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x80 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x100 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x200 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x400 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x800 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x1000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x2000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x4000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x8000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x10000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x20000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x40000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x80000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x100000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x200000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x400000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x800000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x1000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x2000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x4000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x8000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e +#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffff +#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0 +#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000 +#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f +#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffff +#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0 +#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000 +#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x1 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x2 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x4 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x8 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x10 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x20 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x40 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x80 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x100 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x200 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x400 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x800 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x1000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x2000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x4000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x8000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x10000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x20000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x40000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x80000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x100000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x200000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x400000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x800000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x1000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x2000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x4000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x8000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c +#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000 +#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f +#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffff +#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0 +#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000 +#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f +#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffff +#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0 +#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000 +#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f +#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffff +#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0 +#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000 +#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x1f +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x0 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x20 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x5 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x40 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x6 +#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffff +#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x0 +#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffff +#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0 +#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffff +#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0 +#define CG_FPS_CNT__FPS_CNT_MASK 0xffffffff +#define CG_FPS_CNT__FPS_CNT__SHIFT 0x0 +#define SMU_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0 +#define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1 +#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0 +#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2 +#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1 +#define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4 +#define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2 +#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid_MASK 0x8 +#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid__SHIFT 0x3 +#define RCU_UC_EVENTS__TP_Tester_MASK 0x40 +#define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6 +#define RCU_UC_EVENTS__boot_seq_done_MASK 0x80 +#define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7 +#define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100 +#define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8 +#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200 +#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9 +#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400 +#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa +#define RCU_UC_EVENTS__FCH_HALT_MASK 0x800 +#define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb +#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000 +#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd +#define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000 +#define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10 +#define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000 +#define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11 +#define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000 +#define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12 +#define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000 +#define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13 +#define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000 +#define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18 +#define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2 +#define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1 +#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8 +#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3 +#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10 +#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4 +#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20 +#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5 +#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100 +#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8 +#define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000 +#define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10 +#define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000 +#define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11 +#define RCU_MISC_CTRL__SAMU_START_MASK 0x400000 +#define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16 +#define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000 +#define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17 +#define RCU_VIRT_RESET_REQ__VF_MASK 0xffff +#define RCU_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define RCU_VIRT_RESET_REQ__PF_MASK 0x80000000 +#define RCU_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define CC_RCU_FUSES__GPU_DIS_MASK 0x2 +#define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1 +#define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4 +#define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2 +#define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10 +#define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4 +#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20 +#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5 +#define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40 +#define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6 +#define CC_RCU_FUSES__ROM_DIS_MASK 0x80 +#define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7 +#define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100 +#define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8 +#define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200 +#define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9 +#define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400 +#define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa +#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x4000 +#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xe +#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x8000 +#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0xf +#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x10000 +#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x10 +#define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x20000 +#define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x11 +#define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x40000 +#define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x12 +#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x80000 +#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x13 +#define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x200000 +#define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x15 +#define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x400000 +#define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x16 +#define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x800000 +#define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x17 +#define CC_RCU_FUSES__WRP_FUSE_VALID_MASK 0x1000000 +#define CC_RCU_FUSES__WRP_FUSE_VALID__SHIFT 0x18 +#define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x2000000 +#define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0x19 +#define CC_RCU_FUSES__RCU_SPARE_MASK 0xfc000000 +#define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x1a +#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2 +#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1 +#define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc +#define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2 +#define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600 +#define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9 +#define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800 +#define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb +#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17 +#define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000 +#define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b +#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000 +#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c +#define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000 +#define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d +#define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff +#define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0 +#define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00 +#define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8 +#define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000 +#define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10 +#define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000 +#define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18 +#define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe +#define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1 +#define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3e +#define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1 +#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e +#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1 +#define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40 +#define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6 +#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80 +#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7 +#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100 +#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8 +#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200 +#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9 +#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400 +#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa +#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800 +#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000 +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000 +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd +#define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000 +#define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18 +#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000 +#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19 +#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000 +#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a +#define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0 +#define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4 +#define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000 +#define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14 +#define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000 +#define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18 +#define CC_TST_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000 +#define CC_TST_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c +#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2 +#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1 +#define CC_HARVEST_FUSES__VCE_DISABLE_MASK 0x6 +#define CC_HARVEST_FUSES__VCE_DISABLE__SHIFT 0x1 +#define CC_HARVEST_FUSES__UVD_DISABLE_MASK 0x10 +#define CC_HARVEST_FUSES__UVD_DISABLE__SHIFT 0x4 +#define CC_HARVEST_FUSES__ACP_DISABLE_MASK 0x40 +#define CC_HARVEST_FUSES__ACP_DISABLE__SHIFT 0x6 +#define CC_HARVEST_FUSES__DC_DISABLE_MASK 0x3f00 +#define CC_HARVEST_FUSES__DC_DISABLE__SHIFT 0x8 +#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff +#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0 +#define SMU_STATUS__SMU_DONE_MASK 0x1 +#define SMU_STATUS__SMU_DONE__SHIFT 0x0 +#define SMU_STATUS__SMU_PASS_MASK 0x2 +#define SMU_STATUS__SMU_PASS__SHIFT 0x1 +#define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1 +#define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0 +#define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6 +#define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1 +#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8 +#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3 +#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10 +#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4 +#define SMU_FIRMWARE__SMU_counter_MASK 0xf00 +#define SMU_FIRMWARE__SMU_counter__SHIFT 0x8 +#define SMU_FIRMWARE__SMU_MODE_MASK 0x10000 +#define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10 +#define SMU_FIRMWARE__SMU_SEL_MASK 0x20000 +#define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11 +#define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff +#define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0 +#define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000 +#define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f +#define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff +#define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0 +#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1 +#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0 +#define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe +#define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1 +#define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000 +#define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18 +#define TDC_STATUS__VDD_Boost_MASK 0xff +#define TDC_STATUS__VDD_Boost__SHIFT 0x0 +#define TDC_STATUS__VDD_Throttle_MASK 0xff00 +#define TDC_STATUS__VDD_Throttle__SHIFT 0x8 +#define TDC_STATUS__VDDC_Boost_MASK 0xff0000 +#define TDC_STATUS__VDDC_Boost__SHIFT 0x10 +#define TDC_STATUS__VDDC_Throttle_MASK 0xff000000 +#define TDC_STATUS__VDDC_Throttle__SHIFT 0x18 +#define TDC_MV_AVERAGE__IDD_MASK 0xffff +#define TDC_MV_AVERAGE__IDD__SHIFT 0x0 +#define TDC_MV_AVERAGE__IDDC_MASK 0xffff0000 +#define TDC_MV_AVERAGE__IDDC__SHIFT 0x10 +#define TDC_VRM_LIMIT__IDD_MASK 0xffff +#define TDC_VRM_LIMIT__IDD__SHIFT 0x0 +#define TDC_VRM_LIMIT__IDDC_MASK 0xffff0000 +#define TDC_VRM_LIMIT__IDDC__SHIFT 0x10 +#define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1 +#define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0 +#define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2 +#define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1 +#define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4 +#define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2 +#define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8 +#define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3 +#define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10 +#define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4 +#define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x20 +#define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x5 +#define FEATURE_STATUS__ACP_DPM_ON_MASK 0x40 +#define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x6 +#define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80 +#define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7 +#define FEATURE_STATUS__BAPM_ON_MASK 0x100 +#define FEATURE_STATUS__BAPM_ON__SHIFT 0x8 +#define FEATURE_STATUS__LPMX_ON_MASK 0x200 +#define FEATURE_STATUS__LPMX_ON__SHIFT 0x9 +#define FEATURE_STATUS__NBDPM_ON_MASK 0x400 +#define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa +#define FEATURE_STATUS__LHTC_ON_MASK 0x800 +#define FEATURE_STATUS__LHTC_ON__SHIFT 0xb +#define FEATURE_STATUS__VPC_ON_MASK 0x1000 +#define FEATURE_STATUS__VPC_ON__SHIFT 0xc +#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000 +#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd +#define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000 +#define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe +#define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000 +#define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf +#define FEATURE_STATUS__AVS_ON_MASK 0x10000 +#define FEATURE_STATUS__AVS_ON__SHIFT 0x10 +#define FEATURE_STATUS__SPMI_ON_MASK 0x20000 +#define FEATURE_STATUS__SPMI_ON__SHIFT 0x11 +#define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000 +#define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12 +#define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000 +#define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13 +#define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000 +#define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14 +#define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000 +#define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15 +#define FEATURE_STATUS__RESERVED_MASK 0xffc00000 +#define FEATURE_STATUS__RESERVED__SHIFT 0x16 +#define ENTITY_TEMPERATURES_1__GPU_MASK 0xffffffff +#define ENTITY_TEMPERATURES_1__GPU__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime__SHIFT 0x18 +#define DPM_TABLE_1__GraphicsPIDController_Ki_MASK 0xffffffff +#define DPM_TABLE_1__GraphicsPIDController_Ki__SHIFT 0x0 +#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff +#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0 +#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff +#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0 +#define DPM_TABLE_4__GraphicsPIDController_StatePrecision_MASK 0xffffffff +#define DPM_TABLE_4__GraphicsPIDController_StatePrecision__SHIFT 0x0 +#define DPM_TABLE_5__GraphicsPIDController_LfPrecision_MASK 0xffffffff +#define DPM_TABLE_5__GraphicsPIDController_LfPrecision__SHIFT 0x0 +#define DPM_TABLE_6__GraphicsPIDController_LfOffset_MASK 0xffffffff +#define DPM_TABLE_6__GraphicsPIDController_LfOffset__SHIFT 0x0 +#define DPM_TABLE_7__GraphicsPIDController_MaxState_MASK 0xffffffff +#define DPM_TABLE_7__GraphicsPIDController_MaxState__SHIFT 0x0 +#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff +#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction__SHIFT 0x0 +#define DPM_TABLE_9__GraphicsPIDController_StateShift_MASK 0xffffffff +#define DPM_TABLE_9__GraphicsPIDController_StateShift__SHIFT 0x0 +#define DPM_TABLE_10__MemoryPIDController_Ki_MASK 0xffffffff +#define DPM_TABLE_10__MemoryPIDController_Ki__SHIFT 0x0 +#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim_MASK 0xffffffff +#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim__SHIFT 0x0 +#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim_MASK 0xffffffff +#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim__SHIFT 0x0 +#define DPM_TABLE_13__MemoryPIDController_StatePrecision_MASK 0xffffffff +#define DPM_TABLE_13__MemoryPIDController_StatePrecision__SHIFT 0x0 +#define DPM_TABLE_14__MemoryPIDController_LfPrecision_MASK 0xffffffff +#define DPM_TABLE_14__MemoryPIDController_LfPrecision__SHIFT 0x0 +#define DPM_TABLE_15__MemoryPIDController_LfOffset_MASK 0xffffffff +#define DPM_TABLE_15__MemoryPIDController_LfOffset__SHIFT 0x0 +#define DPM_TABLE_16__MemoryPIDController_MaxState_MASK 0xffffffff +#define DPM_TABLE_16__MemoryPIDController_MaxState__SHIFT 0x0 +#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction_MASK 0xffffffff +#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction__SHIFT 0x0 +#define DPM_TABLE_18__MemoryPIDController_StateShift_MASK 0xffffffff +#define DPM_TABLE_18__MemoryPIDController_StateShift__SHIFT 0x0 +#define DPM_TABLE_19__LinkPIDController_Ki_MASK 0xffffffff +#define DPM_TABLE_19__LinkPIDController_Ki__SHIFT 0x0 +#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim_MASK 0xffffffff +#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim__SHIFT 0x0 +#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim_MASK 0xffffffff +#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim__SHIFT 0x0 +#define DPM_TABLE_22__LinkPIDController_StatePrecision_MASK 0xffffffff +#define DPM_TABLE_22__LinkPIDController_StatePrecision__SHIFT 0x0 +#define DPM_TABLE_23__LinkPIDController_LfPrecision_MASK 0xffffffff +#define DPM_TABLE_23__LinkPIDController_LfPrecision__SHIFT 0x0 +#define DPM_TABLE_24__LinkPIDController_LfOffset_MASK 0xffffffff +#define DPM_TABLE_24__LinkPIDController_LfOffset__SHIFT 0x0 +#define DPM_TABLE_25__LinkPIDController_MaxState_MASK 0xffffffff +#define DPM_TABLE_25__LinkPIDController_MaxState__SHIFT 0x0 +#define DPM_TABLE_26__LinkPIDController_MaxLfFraction_MASK 0xffffffff +#define DPM_TABLE_26__LinkPIDController_MaxLfFraction__SHIFT 0x0 +#define DPM_TABLE_27__LinkPIDController_StateShift_MASK 0xffffffff +#define DPM_TABLE_27__LinkPIDController_StateShift__SHIFT 0x0 +#define DPM_TABLE_28__SystemFlags_MASK 0xffffffff +#define DPM_TABLE_28__SystemFlags__SHIFT 0x0 +#define DPM_TABLE_29__VRConfig_MASK 0xffffffff +#define DPM_TABLE_29__VRConfig__SHIFT 0x0 +#define DPM_TABLE_30__SmioMask1_MASK 0xffffffff +#define DPM_TABLE_30__SmioMask1__SHIFT 0x0 +#define DPM_TABLE_31__SmioMask2_MASK 0xffffffff +#define DPM_TABLE_31__SmioMask2__SHIFT 0x0 +#define DPM_TABLE_32__SmioTable1_Pattern_0_padding_MASK 0xff +#define DPM_TABLE_32__SmioTable1_Pattern_0_padding__SHIFT 0x0 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio_MASK 0xff00 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio__SHIFT 0x8 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage_MASK 0xffff0000 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage__SHIFT 0x10 +#define DPM_TABLE_33__SmioTable1_Pattern_1_padding_MASK 0xff +#define DPM_TABLE_33__SmioTable1_Pattern_1_padding__SHIFT 0x0 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio_MASK 0xff00 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio__SHIFT 0x8 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage_MASK 0xffff0000 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage__SHIFT 0x10 +#define DPM_TABLE_34__SmioTable1_Pattern_2_padding_MASK 0xff +#define DPM_TABLE_34__SmioTable1_Pattern_2_padding__SHIFT 0x0 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio_MASK 0xff00 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio__SHIFT 0x8 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage_MASK 0xffff0000 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage__SHIFT 0x10 +#define DPM_TABLE_35__SmioTable1_Pattern_3_padding_MASK 0xff +#define DPM_TABLE_35__SmioTable1_Pattern_3_padding__SHIFT 0x0 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio_MASK 0xff00 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio__SHIFT 0x8 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage_MASK 0xffff0000 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage__SHIFT 0x10 +#define DPM_TABLE_36__SmioTable2_Pattern_0_padding_MASK 0xff +#define DPM_TABLE_36__SmioTable2_Pattern_0_padding__SHIFT 0x0 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio_MASK 0xff00 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio__SHIFT 0x8 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage_MASK 0xffff0000 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage__SHIFT 0x10 +#define DPM_TABLE_37__SmioTable2_Pattern_1_padding_MASK 0xff +#define DPM_TABLE_37__SmioTable2_Pattern_1_padding__SHIFT 0x0 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio_MASK 0xff00 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio__SHIFT 0x8 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage_MASK 0xffff0000 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage__SHIFT 0x10 +#define DPM_TABLE_38__SmioTable2_Pattern_2_padding_MASK 0xff +#define DPM_TABLE_38__SmioTable2_Pattern_2_padding__SHIFT 0x0 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio_MASK 0xff00 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio__SHIFT 0x8 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage_MASK 0xffff0000 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage__SHIFT 0x10 +#define DPM_TABLE_39__SmioTable2_Pattern_3_padding_MASK 0xff +#define DPM_TABLE_39__SmioTable2_Pattern_3_padding__SHIFT 0x0 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio_MASK 0xff00 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio__SHIFT 0x8 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage_MASK 0xffff0000 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage__SHIFT 0x10 +#define DPM_TABLE_40__VddcLevelCount_MASK 0xffffffff +#define DPM_TABLE_40__VddcLevelCount__SHIFT 0x0 +#define DPM_TABLE_41__VddciLevelCount_MASK 0xffffffff +#define DPM_TABLE_41__VddciLevelCount__SHIFT 0x0 +#define DPM_TABLE_42__VddGfxLevelCount_MASK 0xffffffff +#define DPM_TABLE_42__VddGfxLevelCount__SHIFT 0x0 +#define DPM_TABLE_43__MvddLevelCount_MASK 0xffffffff +#define DPM_TABLE_43__MvddLevelCount__SHIFT 0x0 +#define DPM_TABLE_44__VddcTable_1_MASK 0xffff +#define DPM_TABLE_44__VddcTable_1__SHIFT 0x0 +#define DPM_TABLE_44__VddcTable_0_MASK 0xffff0000 +#define DPM_TABLE_44__VddcTable_0__SHIFT 0x10 +#define DPM_TABLE_45__VddcTable_3_MASK 0xffff +#define DPM_TABLE_45__VddcTable_3__SHIFT 0x0 +#define DPM_TABLE_45__VddcTable_2_MASK 0xffff0000 +#define DPM_TABLE_45__VddcTable_2__SHIFT 0x10 +#define DPM_TABLE_46__VddcTable_5_MASK 0xffff +#define DPM_TABLE_46__VddcTable_5__SHIFT 0x0 +#define DPM_TABLE_46__VddcTable_4_MASK 0xffff0000 +#define DPM_TABLE_46__VddcTable_4__SHIFT 0x10 +#define DPM_TABLE_47__VddcTable_7_MASK 0xffff +#define DPM_TABLE_47__VddcTable_7__SHIFT 0x0 +#define DPM_TABLE_47__VddcTable_6_MASK 0xffff0000 +#define DPM_TABLE_47__VddcTable_6__SHIFT 0x10 +#define DPM_TABLE_48__VddcTable_9_MASK 0xffff +#define DPM_TABLE_48__VddcTable_9__SHIFT 0x0 +#define DPM_TABLE_48__VddcTable_8_MASK 0xffff0000 +#define DPM_TABLE_48__VddcTable_8__SHIFT 0x10 +#define DPM_TABLE_49__VddcTable_11_MASK 0xffff +#define DPM_TABLE_49__VddcTable_11__SHIFT 0x0 +#define DPM_TABLE_49__VddcTable_10_MASK 0xffff0000 +#define DPM_TABLE_49__VddcTable_10__SHIFT 0x10 +#define DPM_TABLE_50__VddcTable_13_MASK 0xffff +#define DPM_TABLE_50__VddcTable_13__SHIFT 0x0 +#define DPM_TABLE_50__VddcTable_12_MASK 0xffff0000 +#define DPM_TABLE_50__VddcTable_12__SHIFT 0x10 +#define DPM_TABLE_51__VddcTable_15_MASK 0xffff +#define DPM_TABLE_51__VddcTable_15__SHIFT 0x0 +#define DPM_TABLE_51__VddcTable_14_MASK 0xffff0000 +#define DPM_TABLE_51__VddcTable_14__SHIFT 0x10 +#define DPM_TABLE_52__VddGfxTable_1_MASK 0xffff +#define DPM_TABLE_52__VddGfxTable_1__SHIFT 0x0 +#define DPM_TABLE_52__VddGfxTable_0_MASK 0xffff0000 +#define DPM_TABLE_52__VddGfxTable_0__SHIFT 0x10 +#define DPM_TABLE_53__VddGfxTable_3_MASK 0xffff +#define DPM_TABLE_53__VddGfxTable_3__SHIFT 0x0 +#define DPM_TABLE_53__VddGfxTable_2_MASK 0xffff0000 +#define DPM_TABLE_53__VddGfxTable_2__SHIFT 0x10 +#define DPM_TABLE_54__VddGfxTable_5_MASK 0xffff +#define DPM_TABLE_54__VddGfxTable_5__SHIFT 0x0 +#define DPM_TABLE_54__VddGfxTable_4_MASK 0xffff0000 +#define DPM_TABLE_54__VddGfxTable_4__SHIFT 0x10 +#define DPM_TABLE_55__VddGfxTable_7_MASK 0xffff +#define DPM_TABLE_55__VddGfxTable_7__SHIFT 0x0 +#define DPM_TABLE_55__VddGfxTable_6_MASK 0xffff0000 +#define DPM_TABLE_55__VddGfxTable_6__SHIFT 0x10 +#define DPM_TABLE_56__VddGfxTable_9_MASK 0xffff +#define DPM_TABLE_56__VddGfxTable_9__SHIFT 0x0 +#define DPM_TABLE_56__VddGfxTable_8_MASK 0xffff0000 +#define DPM_TABLE_56__VddGfxTable_8__SHIFT 0x10 +#define DPM_TABLE_57__VddGfxTable_11_MASK 0xffff +#define DPM_TABLE_57__VddGfxTable_11__SHIFT 0x0 +#define DPM_TABLE_57__VddGfxTable_10_MASK 0xffff0000 +#define DPM_TABLE_57__VddGfxTable_10__SHIFT 0x10 +#define DPM_TABLE_58__VddGfxTable_13_MASK 0xffff +#define DPM_TABLE_58__VddGfxTable_13__SHIFT 0x0 +#define DPM_TABLE_58__VddGfxTable_12_MASK 0xffff0000 +#define DPM_TABLE_58__VddGfxTable_12__SHIFT 0x10 +#define DPM_TABLE_59__VddGfxTable_15_MASK 0xffff +#define DPM_TABLE_59__VddGfxTable_15__SHIFT 0x0 +#define DPM_TABLE_59__VddGfxTable_14_MASK 0xffff0000 +#define DPM_TABLE_59__VddGfxTable_14__SHIFT 0x10 +#define DPM_TABLE_60__VddciTable_1_MASK 0xffff +#define DPM_TABLE_60__VddciTable_1__SHIFT 0x0 +#define DPM_TABLE_60__VddciTable_0_MASK 0xffff0000 +#define DPM_TABLE_60__VddciTable_0__SHIFT 0x10 +#define DPM_TABLE_61__VddciTable_3_MASK 0xffff +#define DPM_TABLE_61__VddciTable_3__SHIFT 0x0 +#define DPM_TABLE_61__VddciTable_2_MASK 0xffff0000 +#define DPM_TABLE_61__VddciTable_2__SHIFT 0x10 +#define DPM_TABLE_62__VddciTable_5_MASK 0xffff +#define DPM_TABLE_62__VddciTable_5__SHIFT 0x0 +#define DPM_TABLE_62__VddciTable_4_MASK 0xffff0000 +#define DPM_TABLE_62__VddciTable_4__SHIFT 0x10 +#define DPM_TABLE_63__VddciTable_7_MASK 0xffff +#define DPM_TABLE_63__VddciTable_7__SHIFT 0x0 +#define DPM_TABLE_63__VddciTable_6_MASK 0xffff0000 +#define DPM_TABLE_63__VddciTable_6__SHIFT 0x10 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3_MASK 0xff +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3__SHIFT 0x0 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2_MASK 0xff00 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2__SHIFT 0x8 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1_MASK 0xff0000 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1__SHIFT 0x10 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0_MASK 0xff000000 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0__SHIFT 0x18 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7_MASK 0xff +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7__SHIFT 0x0 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6_MASK 0xff00 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6__SHIFT 0x8 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5_MASK 0xff0000 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5__SHIFT 0x10 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4_MASK 0xff000000 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4__SHIFT 0x18 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11_MASK 0xff +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11__SHIFT 0x0 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10_MASK 0xff00 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10__SHIFT 0x8 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9_MASK 0xff0000 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9__SHIFT 0x10 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8_MASK 0xff000000 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8__SHIFT 0x18 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15_MASK 0xff +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15__SHIFT 0x0 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14_MASK 0xff00 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14__SHIFT 0x8 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13_MASK 0xff0000 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13__SHIFT 0x10 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12_MASK 0xff000000 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12__SHIFT 0x18 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3_MASK 0xff +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3__SHIFT 0x0 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2_MASK 0xff00 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2__SHIFT 0x8 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1_MASK 0xff0000 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1__SHIFT 0x10 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0_MASK 0xff000000 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0__SHIFT 0x18 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7_MASK 0xff +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7__SHIFT 0x0 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6_MASK 0xff00 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6__SHIFT 0x8 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5_MASK 0xff0000 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5__SHIFT 0x10 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4_MASK 0xff000000 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4__SHIFT 0x18 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11_MASK 0xff +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11__SHIFT 0x0 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10_MASK 0xff00 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10__SHIFT 0x8 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9_MASK 0xff0000 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9__SHIFT 0x10 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8_MASK 0xff000000 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8__SHIFT 0x18 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15_MASK 0xff +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15__SHIFT 0x0 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14_MASK 0xff00 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14__SHIFT 0x8 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13_MASK 0xff0000 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13__SHIFT 0x10 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12_MASK 0xff000000 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12__SHIFT 0x18 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3_MASK 0xff +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3__SHIFT 0x0 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2_MASK 0xff00 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2__SHIFT 0x8 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1_MASK 0xff0000 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1__SHIFT 0x10 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0_MASK 0xff000000 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0__SHIFT 0x18 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7_MASK 0xff +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7__SHIFT 0x0 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6_MASK 0xff00 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6__SHIFT 0x8 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5_MASK 0xff0000 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5__SHIFT 0x10 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4_MASK 0xff000000 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4__SHIFT 0x18 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11_MASK 0xff +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11__SHIFT 0x0 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10_MASK 0xff00 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10__SHIFT 0x8 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9_MASK 0xff0000 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9__SHIFT 0x10 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8_MASK 0xff000000 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8__SHIFT 0x18 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15_MASK 0xff +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15__SHIFT 0x0 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14_MASK 0xff00 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14__SHIFT 0x8 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13_MASK 0xff0000 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13__SHIFT 0x10 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12_MASK 0xff000000 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12__SHIFT 0x18 +#define DPM_TABLE_76__BapmVddcVidHiSidd_3_MASK 0xff +#define DPM_TABLE_76__BapmVddcVidHiSidd_3__SHIFT 0x0 +#define DPM_TABLE_76__BapmVddcVidHiSidd_2_MASK 0xff00 +#define DPM_TABLE_76__BapmVddcVidHiSidd_2__SHIFT 0x8 +#define DPM_TABLE_76__BapmVddcVidHiSidd_1_MASK 0xff0000 +#define DPM_TABLE_76__BapmVddcVidHiSidd_1__SHIFT 0x10 +#define DPM_TABLE_76__BapmVddcVidHiSidd_0_MASK 0xff000000 +#define DPM_TABLE_76__BapmVddcVidHiSidd_0__SHIFT 0x18 +#define DPM_TABLE_77__BapmVddcVidHiSidd_7_MASK 0xff +#define DPM_TABLE_77__BapmVddcVidHiSidd_7__SHIFT 0x0 +#define DPM_TABLE_77__BapmVddcVidHiSidd_6_MASK 0xff00 +#define DPM_TABLE_77__BapmVddcVidHiSidd_6__SHIFT 0x8 +#define DPM_TABLE_77__BapmVddcVidHiSidd_5_MASK 0xff0000 +#define DPM_TABLE_77__BapmVddcVidHiSidd_5__SHIFT 0x10 +#define DPM_TABLE_77__BapmVddcVidHiSidd_4_MASK 0xff000000 +#define DPM_TABLE_77__BapmVddcVidHiSidd_4__SHIFT 0x18 +#define DPM_TABLE_78__BapmVddcVidHiSidd_11_MASK 0xff +#define DPM_TABLE_78__BapmVddcVidHiSidd_11__SHIFT 0x0 +#define DPM_TABLE_78__BapmVddcVidHiSidd_10_MASK 0xff00 +#define DPM_TABLE_78__BapmVddcVidHiSidd_10__SHIFT 0x8 +#define DPM_TABLE_78__BapmVddcVidHiSidd_9_MASK 0xff0000 +#define DPM_TABLE_78__BapmVddcVidHiSidd_9__SHIFT 0x10 +#define DPM_TABLE_78__BapmVddcVidHiSidd_8_MASK 0xff000000 +#define DPM_TABLE_78__BapmVddcVidHiSidd_8__SHIFT 0x18 +#define DPM_TABLE_79__BapmVddcVidHiSidd_15_MASK 0xff +#define DPM_TABLE_79__BapmVddcVidHiSidd_15__SHIFT 0x0 +#define DPM_TABLE_79__BapmVddcVidHiSidd_14_MASK 0xff00 +#define DPM_TABLE_79__BapmVddcVidHiSidd_14__SHIFT 0x8 +#define DPM_TABLE_79__BapmVddcVidHiSidd_13_MASK 0xff0000 +#define DPM_TABLE_79__BapmVddcVidHiSidd_13__SHIFT 0x10 +#define DPM_TABLE_79__BapmVddcVidHiSidd_12_MASK 0xff000000 +#define DPM_TABLE_79__BapmVddcVidHiSidd_12__SHIFT 0x18 +#define DPM_TABLE_80__BapmVddcVidLoSidd_3_MASK 0xff +#define DPM_TABLE_80__BapmVddcVidLoSidd_3__SHIFT 0x0 +#define DPM_TABLE_80__BapmVddcVidLoSidd_2_MASK 0xff00 +#define DPM_TABLE_80__BapmVddcVidLoSidd_2__SHIFT 0x8 +#define DPM_TABLE_80__BapmVddcVidLoSidd_1_MASK 0xff0000 +#define DPM_TABLE_80__BapmVddcVidLoSidd_1__SHIFT 0x10 +#define DPM_TABLE_80__BapmVddcVidLoSidd_0_MASK 0xff000000 +#define DPM_TABLE_80__BapmVddcVidLoSidd_0__SHIFT 0x18 +#define DPM_TABLE_81__BapmVddcVidLoSidd_7_MASK 0xff +#define DPM_TABLE_81__BapmVddcVidLoSidd_7__SHIFT 0x0 +#define DPM_TABLE_81__BapmVddcVidLoSidd_6_MASK 0xff00 +#define DPM_TABLE_81__BapmVddcVidLoSidd_6__SHIFT 0x8 +#define DPM_TABLE_81__BapmVddcVidLoSidd_5_MASK 0xff0000 +#define DPM_TABLE_81__BapmVddcVidLoSidd_5__SHIFT 0x10 +#define DPM_TABLE_81__BapmVddcVidLoSidd_4_MASK 0xff000000 +#define DPM_TABLE_81__BapmVddcVidLoSidd_4__SHIFT 0x18 +#define DPM_TABLE_82__BapmVddcVidLoSidd_11_MASK 0xff +#define DPM_TABLE_82__BapmVddcVidLoSidd_11__SHIFT 0x0 +#define DPM_TABLE_82__BapmVddcVidLoSidd_10_MASK 0xff00 +#define DPM_TABLE_82__BapmVddcVidLoSidd_10__SHIFT 0x8 +#define DPM_TABLE_82__BapmVddcVidLoSidd_9_MASK 0xff0000 +#define DPM_TABLE_82__BapmVddcVidLoSidd_9__SHIFT 0x10 +#define DPM_TABLE_82__BapmVddcVidLoSidd_8_MASK 0xff000000 +#define DPM_TABLE_82__BapmVddcVidLoSidd_8__SHIFT 0x18 +#define DPM_TABLE_83__BapmVddcVidLoSidd_15_MASK 0xff +#define DPM_TABLE_83__BapmVddcVidLoSidd_15__SHIFT 0x0 +#define DPM_TABLE_83__BapmVddcVidLoSidd_14_MASK 0xff00 +#define DPM_TABLE_83__BapmVddcVidLoSidd_14__SHIFT 0x8 +#define DPM_TABLE_83__BapmVddcVidLoSidd_13_MASK 0xff0000 +#define DPM_TABLE_83__BapmVddcVidLoSidd_13__SHIFT 0x10 +#define DPM_TABLE_83__BapmVddcVidLoSidd_12_MASK 0xff000000 +#define DPM_TABLE_83__BapmVddcVidLoSidd_12__SHIFT 0x18 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_3_MASK 0xff +#define DPM_TABLE_84__BapmVddcVidHiSidd2_3__SHIFT 0x0 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_2_MASK 0xff00 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_2__SHIFT 0x8 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_1_MASK 0xff0000 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_1__SHIFT 0x10 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_0_MASK 0xff000000 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_0__SHIFT 0x18 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_7_MASK 0xff +#define DPM_TABLE_85__BapmVddcVidHiSidd2_7__SHIFT 0x0 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_6_MASK 0xff00 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_6__SHIFT 0x8 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_5_MASK 0xff0000 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_5__SHIFT 0x10 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_4_MASK 0xff000000 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_4__SHIFT 0x18 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_11_MASK 0xff +#define DPM_TABLE_86__BapmVddcVidHiSidd2_11__SHIFT 0x0 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_10_MASK 0xff00 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_10__SHIFT 0x8 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_9_MASK 0xff0000 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_9__SHIFT 0x10 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_8_MASK 0xff000000 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_8__SHIFT 0x18 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_15_MASK 0xff +#define DPM_TABLE_87__BapmVddcVidHiSidd2_15__SHIFT 0x0 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_14_MASK 0xff00 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_14__SHIFT 0x8 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_13_MASK 0xff0000 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_13__SHIFT 0x10 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_12_MASK 0xff000000 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_12__SHIFT 0x18 +#define DPM_TABLE_88__MasterDeepSleepControl_MASK 0xff +#define DPM_TABLE_88__MasterDeepSleepControl__SHIFT 0x0 +#define DPM_TABLE_88__LinkLevelCount_MASK 0xff00 +#define DPM_TABLE_88__LinkLevelCount__SHIFT 0x8 +#define DPM_TABLE_88__MemoryDpmLevelCount_MASK 0xff0000 +#define DPM_TABLE_88__MemoryDpmLevelCount__SHIFT 0x10 +#define DPM_TABLE_88__GraphicsDpmLevelCount_MASK 0xff000000 +#define DPM_TABLE_88__GraphicsDpmLevelCount__SHIFT 0x18 +#define DPM_TABLE_89__SamuLevelCount_MASK 0xff +#define DPM_TABLE_89__SamuLevelCount__SHIFT 0x0 +#define DPM_TABLE_89__AcpLevelCount_MASK 0xff00 +#define DPM_TABLE_89__AcpLevelCount__SHIFT 0x8 +#define DPM_TABLE_89__VceLevelCount_MASK 0xff0000 +#define DPM_TABLE_89__VceLevelCount__SHIFT 0x10 +#define DPM_TABLE_89__UvdLevelCount_MASK 0xff000000 +#define DPM_TABLE_89__UvdLevelCount__SHIFT 0x18 +#define DPM_TABLE_90__Reserved_0_MASK 0xff +#define DPM_TABLE_90__Reserved_0__SHIFT 0x0 +#define DPM_TABLE_90__ThermOutMode_MASK 0xff00 +#define DPM_TABLE_90__ThermOutMode__SHIFT 0x8 +#define DPM_TABLE_90__ThermOutPolarity_MASK 0xff0000 +#define DPM_TABLE_90__ThermOutPolarity__SHIFT 0x10 +#define DPM_TABLE_90__ThermOutGpio_MASK 0xff000000 +#define DPM_TABLE_90__ThermOutGpio__SHIFT 0x18 +#define DPM_TABLE_91__Reserved_0_MASK 0xffffffff +#define DPM_TABLE_91__Reserved_0__SHIFT 0x0 +#define DPM_TABLE_92__Reserved_1_MASK 0xffffffff +#define DPM_TABLE_92__Reserved_1__SHIFT 0x0 +#define DPM_TABLE_93__Reserved_2_MASK 0xffffffff +#define DPM_TABLE_93__Reserved_2__SHIFT 0x0 +#define DPM_TABLE_94__Reserved_3_MASK 0xffffffff +#define DPM_TABLE_94__Reserved_3__SHIFT 0x0 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel_MASK 0xffff +#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_104__GraphicsLevel_0_SclkDid_MASK 0xff000000 +#define DPM_TABLE_104__GraphicsLevel_0_SclkDid__SHIFT 0x18 +#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle_MASK 0xff +#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_105__GraphicsLevel_0_DownHyst_MASK 0xff0000 +#define DPM_TABLE_105__GraphicsLevel_0_DownHyst__SHIFT 0x10 +#define DPM_TABLE_105__GraphicsLevel_0_UpHyst_MASK 0xff000000 +#define DPM_TABLE_105__GraphicsLevel_0_UpHyst__SHIFT 0x18 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel_MASK 0xffff +#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_115__GraphicsLevel_1_SclkDid_MASK 0xff000000 +#define DPM_TABLE_115__GraphicsLevel_1_SclkDid__SHIFT 0x18 +#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle_MASK 0xff +#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_116__GraphicsLevel_1_DownHyst_MASK 0xff0000 +#define DPM_TABLE_116__GraphicsLevel_1_DownHyst__SHIFT 0x10 +#define DPM_TABLE_116__GraphicsLevel_1_UpHyst_MASK 0xff000000 +#define DPM_TABLE_116__GraphicsLevel_1_UpHyst__SHIFT 0x18 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel_MASK 0xffff +#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_126__GraphicsLevel_2_SclkDid_MASK 0xff000000 +#define DPM_TABLE_126__GraphicsLevel_2_SclkDid__SHIFT 0x18 +#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle_MASK 0xff +#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_127__GraphicsLevel_2_DownHyst_MASK 0xff0000 +#define DPM_TABLE_127__GraphicsLevel_2_DownHyst__SHIFT 0x10 +#define DPM_TABLE_127__GraphicsLevel_2_UpHyst_MASK 0xff000000 +#define DPM_TABLE_127__GraphicsLevel_2_UpHyst__SHIFT 0x18 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel_MASK 0xffff +#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_137__GraphicsLevel_3_SclkDid_MASK 0xff000000 +#define DPM_TABLE_137__GraphicsLevel_3_SclkDid__SHIFT 0x18 +#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle_MASK 0xff +#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_138__GraphicsLevel_3_DownHyst_MASK 0xff0000 +#define DPM_TABLE_138__GraphicsLevel_3_DownHyst__SHIFT 0x10 +#define DPM_TABLE_138__GraphicsLevel_3_UpHyst_MASK 0xff000000 +#define DPM_TABLE_138__GraphicsLevel_3_UpHyst__SHIFT 0x18 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel_MASK 0xffff +#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_148__GraphicsLevel_4_SclkDid_MASK 0xff000000 +#define DPM_TABLE_148__GraphicsLevel_4_SclkDid__SHIFT 0x18 +#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle_MASK 0xff +#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_149__GraphicsLevel_4_DownHyst_MASK 0xff0000 +#define DPM_TABLE_149__GraphicsLevel_4_DownHyst__SHIFT 0x10 +#define DPM_TABLE_149__GraphicsLevel_4_UpHyst_MASK 0xff000000 +#define DPM_TABLE_149__GraphicsLevel_4_UpHyst__SHIFT 0x18 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel_MASK 0xffff +#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_159__GraphicsLevel_5_SclkDid_MASK 0xff000000 +#define DPM_TABLE_159__GraphicsLevel_5_SclkDid__SHIFT 0x18 +#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle_MASK 0xff +#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_160__GraphicsLevel_5_DownHyst_MASK 0xff0000 +#define DPM_TABLE_160__GraphicsLevel_5_DownHyst__SHIFT 0x10 +#define DPM_TABLE_160__GraphicsLevel_5_UpHyst_MASK 0xff000000 +#define DPM_TABLE_160__GraphicsLevel_5_UpHyst__SHIFT 0x18 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel_MASK 0xffff +#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_170__GraphicsLevel_6_SclkDid_MASK 0xff000000 +#define DPM_TABLE_170__GraphicsLevel_6_SclkDid__SHIFT 0x18 +#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle_MASK 0xff +#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_171__GraphicsLevel_6_DownHyst_MASK 0xff0000 +#define DPM_TABLE_171__GraphicsLevel_6_DownHyst__SHIFT 0x10 +#define DPM_TABLE_171__GraphicsLevel_6_UpHyst_MASK 0xff000000 +#define DPM_TABLE_171__GraphicsLevel_6_UpHyst__SHIFT 0x18 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel_MASK 0xffff +#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_181__GraphicsLevel_7_SclkDid_MASK 0xff000000 +#define DPM_TABLE_181__GraphicsLevel_7_SclkDid__SHIFT 0x18 +#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle_MASK 0xff +#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_182__GraphicsLevel_7_DownHyst_MASK 0xff0000 +#define DPM_TABLE_182__GraphicsLevel_7_DownHyst__SHIFT 0x10 +#define DPM_TABLE_182__GraphicsLevel_7_UpHyst_MASK 0xff000000 +#define DPM_TABLE_182__GraphicsLevel_7_UpHyst__SHIFT 0x18 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_184__MemoryACPILevel_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_184__MemoryACPILevel_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity_MASK 0xff +#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_186__MemoryACPILevel_FreqRange_MASK 0xff0000 +#define DPM_TABLE_186__MemoryACPILevel_FreqRange__SHIFT 0x10 +#define DPM_TABLE_186__MemoryACPILevel_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_186__MemoryACPILevel_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_187__MemoryACPILevel_padding_MASK 0xff +#define DPM_TABLE_187__MemoryACPILevel_padding__SHIFT 0x0 +#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_187__MemoryACPILevel_DownHyst_MASK 0xff0000 +#define DPM_TABLE_187__MemoryACPILevel_DownHyst__SHIFT 0x10 +#define DPM_TABLE_187__MemoryACPILevel_UpHyst_MASK 0xff000000 +#define DPM_TABLE_187__MemoryACPILevel_UpHyst__SHIFT 0x18 +#define DPM_TABLE_188__MemoryACPILevel_MclkDivider_MASK 0xff +#define DPM_TABLE_188__MemoryACPILevel_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_190__MemoryLevel_0_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_190__MemoryLevel_0_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity_MASK 0xff +#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_192__MemoryLevel_0_FreqRange_MASK 0xff0000 +#define DPM_TABLE_192__MemoryLevel_0_FreqRange__SHIFT 0x10 +#define DPM_TABLE_192__MemoryLevel_0_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_192__MemoryLevel_0_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_193__MemoryLevel_0_padding_MASK 0xff +#define DPM_TABLE_193__MemoryLevel_0_padding__SHIFT 0x0 +#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_193__MemoryLevel_0_DownHyst_MASK 0xff0000 +#define DPM_TABLE_193__MemoryLevel_0_DownHyst__SHIFT 0x10 +#define DPM_TABLE_193__MemoryLevel_0_UpHyst_MASK 0xff000000 +#define DPM_TABLE_193__MemoryLevel_0_UpHyst__SHIFT 0x18 +#define DPM_TABLE_194__MemoryLevel_0_MclkDivider_MASK 0xff +#define DPM_TABLE_194__MemoryLevel_0_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_196__MemoryLevel_1_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_196__MemoryLevel_1_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity_MASK 0xff +#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_198__MemoryLevel_1_FreqRange_MASK 0xff0000 +#define DPM_TABLE_198__MemoryLevel_1_FreqRange__SHIFT 0x10 +#define DPM_TABLE_198__MemoryLevel_1_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_198__MemoryLevel_1_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_199__MemoryLevel_1_padding_MASK 0xff +#define DPM_TABLE_199__MemoryLevel_1_padding__SHIFT 0x0 +#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_199__MemoryLevel_1_DownHyst_MASK 0xff0000 +#define DPM_TABLE_199__MemoryLevel_1_DownHyst__SHIFT 0x10 +#define DPM_TABLE_199__MemoryLevel_1_UpHyst_MASK 0xff000000 +#define DPM_TABLE_199__MemoryLevel_1_UpHyst__SHIFT 0x18 +#define DPM_TABLE_200__MemoryLevel_1_MclkDivider_MASK 0xff +#define DPM_TABLE_200__MemoryLevel_1_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_202__MemoryLevel_2_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_202__MemoryLevel_2_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity_MASK 0xff +#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_204__MemoryLevel_2_FreqRange_MASK 0xff0000 +#define DPM_TABLE_204__MemoryLevel_2_FreqRange__SHIFT 0x10 +#define DPM_TABLE_204__MemoryLevel_2_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_204__MemoryLevel_2_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_205__MemoryLevel_2_padding_MASK 0xff +#define DPM_TABLE_205__MemoryLevel_2_padding__SHIFT 0x0 +#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_205__MemoryLevel_2_DownHyst_MASK 0xff0000 +#define DPM_TABLE_205__MemoryLevel_2_DownHyst__SHIFT 0x10 +#define DPM_TABLE_205__MemoryLevel_2_UpHyst_MASK 0xff000000 +#define DPM_TABLE_205__MemoryLevel_2_UpHyst__SHIFT 0x18 +#define DPM_TABLE_206__MemoryLevel_2_MclkDivider_MASK 0xff +#define DPM_TABLE_206__MemoryLevel_2_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_208__MemoryLevel_3_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_208__MemoryLevel_3_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity_MASK 0xff +#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_210__MemoryLevel_3_FreqRange_MASK 0xff0000 +#define DPM_TABLE_210__MemoryLevel_3_FreqRange__SHIFT 0x10 +#define DPM_TABLE_210__MemoryLevel_3_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_210__MemoryLevel_3_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_211__MemoryLevel_3_padding_MASK 0xff +#define DPM_TABLE_211__MemoryLevel_3_padding__SHIFT 0x0 +#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_211__MemoryLevel_3_DownHyst_MASK 0xff0000 +#define DPM_TABLE_211__MemoryLevel_3_DownHyst__SHIFT 0x10 +#define DPM_TABLE_211__MemoryLevel_3_UpHyst_MASK 0xff000000 +#define DPM_TABLE_211__MemoryLevel_3_UpHyst__SHIFT 0x18 +#define DPM_TABLE_212__MemoryLevel_3_MclkDivider_MASK 0xff +#define DPM_TABLE_212__MemoryLevel_3_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_213__LinkLevel_0_SPC_MASK 0xff +#define DPM_TABLE_213__LinkLevel_0_SPC__SHIFT 0x0 +#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_214__LinkLevel_0_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_214__LinkLevel_0_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_215__LinkLevel_0_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_215__LinkLevel_0_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_216__LinkLevel_0_Reserved_MASK 0xffffffff +#define DPM_TABLE_216__LinkLevel_0_Reserved__SHIFT 0x0 +#define DPM_TABLE_217__LinkLevel_1_SPC_MASK 0xff +#define DPM_TABLE_217__LinkLevel_1_SPC__SHIFT 0x0 +#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_218__LinkLevel_1_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_218__LinkLevel_1_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_219__LinkLevel_1_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_219__LinkLevel_1_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_220__LinkLevel_1_Reserved_MASK 0xffffffff +#define DPM_TABLE_220__LinkLevel_1_Reserved__SHIFT 0x0 +#define DPM_TABLE_221__LinkLevel_2_SPC_MASK 0xff +#define DPM_TABLE_221__LinkLevel_2_SPC__SHIFT 0x0 +#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_222__LinkLevel_2_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_222__LinkLevel_2_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_223__LinkLevel_2_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_223__LinkLevel_2_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_224__LinkLevel_2_Reserved_MASK 0xffffffff +#define DPM_TABLE_224__LinkLevel_2_Reserved__SHIFT 0x0 +#define DPM_TABLE_225__LinkLevel_3_SPC_MASK 0xff +#define DPM_TABLE_225__LinkLevel_3_SPC__SHIFT 0x0 +#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_226__LinkLevel_3_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_226__LinkLevel_3_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_227__LinkLevel_3_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_227__LinkLevel_3_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_228__LinkLevel_3_Reserved_MASK 0xffffffff +#define DPM_TABLE_228__LinkLevel_3_Reserved__SHIFT 0x0 +#define DPM_TABLE_229__LinkLevel_4_SPC_MASK 0xff +#define DPM_TABLE_229__LinkLevel_4_SPC__SHIFT 0x0 +#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_230__LinkLevel_4_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_230__LinkLevel_4_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_231__LinkLevel_4_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_231__LinkLevel_4_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_232__LinkLevel_4_Reserved_MASK 0xffffffff +#define DPM_TABLE_232__LinkLevel_4_Reserved__SHIFT 0x0 +#define DPM_TABLE_233__LinkLevel_5_SPC_MASK 0xff +#define DPM_TABLE_233__LinkLevel_5_SPC__SHIFT 0x0 +#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_234__LinkLevel_5_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_234__LinkLevel_5_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_235__LinkLevel_5_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_235__LinkLevel_5_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_236__LinkLevel_5_Reserved_MASK 0xffffffff +#define DPM_TABLE_236__LinkLevel_5_Reserved__SHIFT 0x0 +#define DPM_TABLE_237__LinkLevel_6_SPC_MASK 0xff +#define DPM_TABLE_237__LinkLevel_6_SPC__SHIFT 0x0 +#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_238__LinkLevel_6_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_238__LinkLevel_6_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_239__LinkLevel_6_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_239__LinkLevel_6_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_240__LinkLevel_6_Reserved_MASK 0xffffffff +#define DPM_TABLE_240__LinkLevel_6_Reserved__SHIFT 0x0 +#define DPM_TABLE_241__LinkLevel_7_SPC_MASK 0xff +#define DPM_TABLE_241__LinkLevel_7_SPC__SHIFT 0x0 +#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_242__LinkLevel_7_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_242__LinkLevel_7_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_243__LinkLevel_7_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_243__LinkLevel_7_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_244__LinkLevel_7_Reserved_MASK 0xffffffff +#define DPM_TABLE_244__LinkLevel_7_Reserved__SHIFT 0x0 +#define DPM_TABLE_245__ACPILevel_Flags_MASK 0xffffffff +#define DPM_TABLE_245__ACPILevel_Flags__SHIFT 0x0 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_247__ACPILevel_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_247__ACPILevel_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_248__ACPILevel_padding_MASK 0xff +#define DPM_TABLE_248__ACPILevel_padding__SHIFT 0x0 +#define DPM_TABLE_248__ACPILevel_DeepSleepDivId_MASK 0xff00 +#define DPM_TABLE_248__ACPILevel_DeepSleepDivId__SHIFT 0x8 +#define DPM_TABLE_248__ACPILevel_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_248__ACPILevel_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_248__ACPILevel_SclkDid_MASK 0xff000000 +#define DPM_TABLE_248__ACPILevel_SclkDid__SHIFT 0x18 +#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl_MASK 0xffffffff +#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl__SHIFT 0x0 +#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2_MASK 0xffffffff +#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2__SHIFT 0x0 +#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_255__ACPILevel_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_255__ACPILevel_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_257__UvdLevel_0_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_257__UvdLevel_0_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_258__UvdLevel_0_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_258__UvdLevel_0_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_260__UvdLevel_0_padding_1_MASK 0xff +#define DPM_TABLE_260__UvdLevel_0_padding_1__SHIFT 0x0 +#define DPM_TABLE_260__UvdLevel_0_padding_0_MASK 0xff00 +#define DPM_TABLE_260__UvdLevel_0_padding_0__SHIFT 0x8 +#define DPM_TABLE_260__UvdLevel_0_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_260__UvdLevel_0_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_260__UvdLevel_0_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_260__UvdLevel_0_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_261__UvdLevel_1_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_261__UvdLevel_1_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_262__UvdLevel_1_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_262__UvdLevel_1_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_264__UvdLevel_1_padding_1_MASK 0xff +#define DPM_TABLE_264__UvdLevel_1_padding_1__SHIFT 0x0 +#define DPM_TABLE_264__UvdLevel_1_padding_0_MASK 0xff00 +#define DPM_TABLE_264__UvdLevel_1_padding_0__SHIFT 0x8 +#define DPM_TABLE_264__UvdLevel_1_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_264__UvdLevel_1_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_264__UvdLevel_1_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_264__UvdLevel_1_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_265__UvdLevel_2_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_265__UvdLevel_2_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_266__UvdLevel_2_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_266__UvdLevel_2_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_268__UvdLevel_2_padding_1_MASK 0xff +#define DPM_TABLE_268__UvdLevel_2_padding_1__SHIFT 0x0 +#define DPM_TABLE_268__UvdLevel_2_padding_0_MASK 0xff00 +#define DPM_TABLE_268__UvdLevel_2_padding_0__SHIFT 0x8 +#define DPM_TABLE_268__UvdLevel_2_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_268__UvdLevel_2_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_268__UvdLevel_2_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_268__UvdLevel_2_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_269__UvdLevel_3_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_269__UvdLevel_3_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_270__UvdLevel_3_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_270__UvdLevel_3_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_272__UvdLevel_3_padding_1_MASK 0xff +#define DPM_TABLE_272__UvdLevel_3_padding_1__SHIFT 0x0 +#define DPM_TABLE_272__UvdLevel_3_padding_0_MASK 0xff00 +#define DPM_TABLE_272__UvdLevel_3_padding_0__SHIFT 0x8 +#define DPM_TABLE_272__UvdLevel_3_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_272__UvdLevel_3_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_272__UvdLevel_3_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_272__UvdLevel_3_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_273__UvdLevel_4_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_273__UvdLevel_4_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_274__UvdLevel_4_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_274__UvdLevel_4_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_276__UvdLevel_4_padding_1_MASK 0xff +#define DPM_TABLE_276__UvdLevel_4_padding_1__SHIFT 0x0 +#define DPM_TABLE_276__UvdLevel_4_padding_0_MASK 0xff00 +#define DPM_TABLE_276__UvdLevel_4_padding_0__SHIFT 0x8 +#define DPM_TABLE_276__UvdLevel_4_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_276__UvdLevel_4_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_276__UvdLevel_4_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_276__UvdLevel_4_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_277__UvdLevel_5_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_277__UvdLevel_5_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_278__UvdLevel_5_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_278__UvdLevel_5_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_280__UvdLevel_5_padding_1_MASK 0xff +#define DPM_TABLE_280__UvdLevel_5_padding_1__SHIFT 0x0 +#define DPM_TABLE_280__UvdLevel_5_padding_0_MASK 0xff00 +#define DPM_TABLE_280__UvdLevel_5_padding_0__SHIFT 0x8 +#define DPM_TABLE_280__UvdLevel_5_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_280__UvdLevel_5_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_280__UvdLevel_5_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_280__UvdLevel_5_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_281__UvdLevel_6_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_281__UvdLevel_6_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_282__UvdLevel_6_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_282__UvdLevel_6_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_284__UvdLevel_6_padding_1_MASK 0xff +#define DPM_TABLE_284__UvdLevel_6_padding_1__SHIFT 0x0 +#define DPM_TABLE_284__UvdLevel_6_padding_0_MASK 0xff00 +#define DPM_TABLE_284__UvdLevel_6_padding_0__SHIFT 0x8 +#define DPM_TABLE_284__UvdLevel_6_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_284__UvdLevel_6_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_284__UvdLevel_6_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_284__UvdLevel_6_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_285__UvdLevel_7_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_285__UvdLevel_7_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_286__UvdLevel_7_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_286__UvdLevel_7_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_288__UvdLevel_7_padding_1_MASK 0xff +#define DPM_TABLE_288__UvdLevel_7_padding_1__SHIFT 0x0 +#define DPM_TABLE_288__UvdLevel_7_padding_0_MASK 0xff00 +#define DPM_TABLE_288__UvdLevel_7_padding_0__SHIFT 0x8 +#define DPM_TABLE_288__UvdLevel_7_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_288__UvdLevel_7_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_288__UvdLevel_7_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_288__UvdLevel_7_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_289__VceLevel_0_Frequency_MASK 0xffffffff +#define DPM_TABLE_289__VceLevel_0_Frequency__SHIFT 0x0 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_291__VceLevel_0_padding_2_MASK 0xff +#define DPM_TABLE_291__VceLevel_0_padding_2__SHIFT 0x0 +#define DPM_TABLE_291__VceLevel_0_padding_1_MASK 0xff00 +#define DPM_TABLE_291__VceLevel_0_padding_1__SHIFT 0x8 +#define DPM_TABLE_291__VceLevel_0_padding_0_MASK 0xff0000 +#define DPM_TABLE_291__VceLevel_0_padding_0__SHIFT 0x10 +#define DPM_TABLE_291__VceLevel_0_Divider_MASK 0xff000000 +#define DPM_TABLE_291__VceLevel_0_Divider__SHIFT 0x18 +#define DPM_TABLE_292__VceLevel_1_Frequency_MASK 0xffffffff +#define DPM_TABLE_292__VceLevel_1_Frequency__SHIFT 0x0 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_294__VceLevel_1_padding_2_MASK 0xff +#define DPM_TABLE_294__VceLevel_1_padding_2__SHIFT 0x0 +#define DPM_TABLE_294__VceLevel_1_padding_1_MASK 0xff00 +#define DPM_TABLE_294__VceLevel_1_padding_1__SHIFT 0x8 +#define DPM_TABLE_294__VceLevel_1_padding_0_MASK 0xff0000 +#define DPM_TABLE_294__VceLevel_1_padding_0__SHIFT 0x10 +#define DPM_TABLE_294__VceLevel_1_Divider_MASK 0xff000000 +#define DPM_TABLE_294__VceLevel_1_Divider__SHIFT 0x18 +#define DPM_TABLE_295__VceLevel_2_Frequency_MASK 0xffffffff +#define DPM_TABLE_295__VceLevel_2_Frequency__SHIFT 0x0 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_297__VceLevel_2_padding_2_MASK 0xff +#define DPM_TABLE_297__VceLevel_2_padding_2__SHIFT 0x0 +#define DPM_TABLE_297__VceLevel_2_padding_1_MASK 0xff00 +#define DPM_TABLE_297__VceLevel_2_padding_1__SHIFT 0x8 +#define DPM_TABLE_297__VceLevel_2_padding_0_MASK 0xff0000 +#define DPM_TABLE_297__VceLevel_2_padding_0__SHIFT 0x10 +#define DPM_TABLE_297__VceLevel_2_Divider_MASK 0xff000000 +#define DPM_TABLE_297__VceLevel_2_Divider__SHIFT 0x18 +#define DPM_TABLE_298__VceLevel_3_Frequency_MASK 0xffffffff +#define DPM_TABLE_298__VceLevel_3_Frequency__SHIFT 0x0 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_300__VceLevel_3_padding_2_MASK 0xff +#define DPM_TABLE_300__VceLevel_3_padding_2__SHIFT 0x0 +#define DPM_TABLE_300__VceLevel_3_padding_1_MASK 0xff00 +#define DPM_TABLE_300__VceLevel_3_padding_1__SHIFT 0x8 +#define DPM_TABLE_300__VceLevel_3_padding_0_MASK 0xff0000 +#define DPM_TABLE_300__VceLevel_3_padding_0__SHIFT 0x10 +#define DPM_TABLE_300__VceLevel_3_Divider_MASK 0xff000000 +#define DPM_TABLE_300__VceLevel_3_Divider__SHIFT 0x18 +#define DPM_TABLE_301__VceLevel_4_Frequency_MASK 0xffffffff +#define DPM_TABLE_301__VceLevel_4_Frequency__SHIFT 0x0 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_303__VceLevel_4_padding_2_MASK 0xff +#define DPM_TABLE_303__VceLevel_4_padding_2__SHIFT 0x0 +#define DPM_TABLE_303__VceLevel_4_padding_1_MASK 0xff00 +#define DPM_TABLE_303__VceLevel_4_padding_1__SHIFT 0x8 +#define DPM_TABLE_303__VceLevel_4_padding_0_MASK 0xff0000 +#define DPM_TABLE_303__VceLevel_4_padding_0__SHIFT 0x10 +#define DPM_TABLE_303__VceLevel_4_Divider_MASK 0xff000000 +#define DPM_TABLE_303__VceLevel_4_Divider__SHIFT 0x18 +#define DPM_TABLE_304__VceLevel_5_Frequency_MASK 0xffffffff +#define DPM_TABLE_304__VceLevel_5_Frequency__SHIFT 0x0 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_306__VceLevel_5_padding_2_MASK 0xff +#define DPM_TABLE_306__VceLevel_5_padding_2__SHIFT 0x0 +#define DPM_TABLE_306__VceLevel_5_padding_1_MASK 0xff00 +#define DPM_TABLE_306__VceLevel_5_padding_1__SHIFT 0x8 +#define DPM_TABLE_306__VceLevel_5_padding_0_MASK 0xff0000 +#define DPM_TABLE_306__VceLevel_5_padding_0__SHIFT 0x10 +#define DPM_TABLE_306__VceLevel_5_Divider_MASK 0xff000000 +#define DPM_TABLE_306__VceLevel_5_Divider__SHIFT 0x18 +#define DPM_TABLE_307__VceLevel_6_Frequency_MASK 0xffffffff +#define DPM_TABLE_307__VceLevel_6_Frequency__SHIFT 0x0 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_309__VceLevel_6_padding_2_MASK 0xff +#define DPM_TABLE_309__VceLevel_6_padding_2__SHIFT 0x0 +#define DPM_TABLE_309__VceLevel_6_padding_1_MASK 0xff00 +#define DPM_TABLE_309__VceLevel_6_padding_1__SHIFT 0x8 +#define DPM_TABLE_309__VceLevel_6_padding_0_MASK 0xff0000 +#define DPM_TABLE_309__VceLevel_6_padding_0__SHIFT 0x10 +#define DPM_TABLE_309__VceLevel_6_Divider_MASK 0xff000000 +#define DPM_TABLE_309__VceLevel_6_Divider__SHIFT 0x18 +#define DPM_TABLE_310__VceLevel_7_Frequency_MASK 0xffffffff +#define DPM_TABLE_310__VceLevel_7_Frequency__SHIFT 0x0 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_312__VceLevel_7_padding_2_MASK 0xff +#define DPM_TABLE_312__VceLevel_7_padding_2__SHIFT 0x0 +#define DPM_TABLE_312__VceLevel_7_padding_1_MASK 0xff00 +#define DPM_TABLE_312__VceLevel_7_padding_1__SHIFT 0x8 +#define DPM_TABLE_312__VceLevel_7_padding_0_MASK 0xff0000 +#define DPM_TABLE_312__VceLevel_7_padding_0__SHIFT 0x10 +#define DPM_TABLE_312__VceLevel_7_Divider_MASK 0xff000000 +#define DPM_TABLE_312__VceLevel_7_Divider__SHIFT 0x18 +#define DPM_TABLE_313__AcpLevel_0_Frequency_MASK 0xffffffff +#define DPM_TABLE_313__AcpLevel_0_Frequency__SHIFT 0x0 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_315__AcpLevel_0_padding_2_MASK 0xff +#define DPM_TABLE_315__AcpLevel_0_padding_2__SHIFT 0x0 +#define DPM_TABLE_315__AcpLevel_0_padding_1_MASK 0xff00 +#define DPM_TABLE_315__AcpLevel_0_padding_1__SHIFT 0x8 +#define DPM_TABLE_315__AcpLevel_0_padding_0_MASK 0xff0000 +#define DPM_TABLE_315__AcpLevel_0_padding_0__SHIFT 0x10 +#define DPM_TABLE_315__AcpLevel_0_Divider_MASK 0xff000000 +#define DPM_TABLE_315__AcpLevel_0_Divider__SHIFT 0x18 +#define DPM_TABLE_316__AcpLevel_1_Frequency_MASK 0xffffffff +#define DPM_TABLE_316__AcpLevel_1_Frequency__SHIFT 0x0 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_318__AcpLevel_1_padding_2_MASK 0xff +#define DPM_TABLE_318__AcpLevel_1_padding_2__SHIFT 0x0 +#define DPM_TABLE_318__AcpLevel_1_padding_1_MASK 0xff00 +#define DPM_TABLE_318__AcpLevel_1_padding_1__SHIFT 0x8 +#define DPM_TABLE_318__AcpLevel_1_padding_0_MASK 0xff0000 +#define DPM_TABLE_318__AcpLevel_1_padding_0__SHIFT 0x10 +#define DPM_TABLE_318__AcpLevel_1_Divider_MASK 0xff000000 +#define DPM_TABLE_318__AcpLevel_1_Divider__SHIFT 0x18 +#define DPM_TABLE_319__AcpLevel_2_Frequency_MASK 0xffffffff +#define DPM_TABLE_319__AcpLevel_2_Frequency__SHIFT 0x0 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_321__AcpLevel_2_padding_2_MASK 0xff +#define DPM_TABLE_321__AcpLevel_2_padding_2__SHIFT 0x0 +#define DPM_TABLE_321__AcpLevel_2_padding_1_MASK 0xff00 +#define DPM_TABLE_321__AcpLevel_2_padding_1__SHIFT 0x8 +#define DPM_TABLE_321__AcpLevel_2_padding_0_MASK 0xff0000 +#define DPM_TABLE_321__AcpLevel_2_padding_0__SHIFT 0x10 +#define DPM_TABLE_321__AcpLevel_2_Divider_MASK 0xff000000 +#define DPM_TABLE_321__AcpLevel_2_Divider__SHIFT 0x18 +#define DPM_TABLE_322__AcpLevel_3_Frequency_MASK 0xffffffff +#define DPM_TABLE_322__AcpLevel_3_Frequency__SHIFT 0x0 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_324__AcpLevel_3_padding_2_MASK 0xff +#define DPM_TABLE_324__AcpLevel_3_padding_2__SHIFT 0x0 +#define DPM_TABLE_324__AcpLevel_3_padding_1_MASK 0xff00 +#define DPM_TABLE_324__AcpLevel_3_padding_1__SHIFT 0x8 +#define DPM_TABLE_324__AcpLevel_3_padding_0_MASK 0xff0000 +#define DPM_TABLE_324__AcpLevel_3_padding_0__SHIFT 0x10 +#define DPM_TABLE_324__AcpLevel_3_Divider_MASK 0xff000000 +#define DPM_TABLE_324__AcpLevel_3_Divider__SHIFT 0x18 +#define DPM_TABLE_325__AcpLevel_4_Frequency_MASK 0xffffffff +#define DPM_TABLE_325__AcpLevel_4_Frequency__SHIFT 0x0 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_327__AcpLevel_4_padding_2_MASK 0xff +#define DPM_TABLE_327__AcpLevel_4_padding_2__SHIFT 0x0 +#define DPM_TABLE_327__AcpLevel_4_padding_1_MASK 0xff00 +#define DPM_TABLE_327__AcpLevel_4_padding_1__SHIFT 0x8 +#define DPM_TABLE_327__AcpLevel_4_padding_0_MASK 0xff0000 +#define DPM_TABLE_327__AcpLevel_4_padding_0__SHIFT 0x10 +#define DPM_TABLE_327__AcpLevel_4_Divider_MASK 0xff000000 +#define DPM_TABLE_327__AcpLevel_4_Divider__SHIFT 0x18 +#define DPM_TABLE_328__AcpLevel_5_Frequency_MASK 0xffffffff +#define DPM_TABLE_328__AcpLevel_5_Frequency__SHIFT 0x0 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_330__AcpLevel_5_padding_2_MASK 0xff +#define DPM_TABLE_330__AcpLevel_5_padding_2__SHIFT 0x0 +#define DPM_TABLE_330__AcpLevel_5_padding_1_MASK 0xff00 +#define DPM_TABLE_330__AcpLevel_5_padding_1__SHIFT 0x8 +#define DPM_TABLE_330__AcpLevel_5_padding_0_MASK 0xff0000 +#define DPM_TABLE_330__AcpLevel_5_padding_0__SHIFT 0x10 +#define DPM_TABLE_330__AcpLevel_5_Divider_MASK 0xff000000 +#define DPM_TABLE_330__AcpLevel_5_Divider__SHIFT 0x18 +#define DPM_TABLE_331__AcpLevel_6_Frequency_MASK 0xffffffff +#define DPM_TABLE_331__AcpLevel_6_Frequency__SHIFT 0x0 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_333__AcpLevel_6_padding_2_MASK 0xff +#define DPM_TABLE_333__AcpLevel_6_padding_2__SHIFT 0x0 +#define DPM_TABLE_333__AcpLevel_6_padding_1_MASK 0xff00 +#define DPM_TABLE_333__AcpLevel_6_padding_1__SHIFT 0x8 +#define DPM_TABLE_333__AcpLevel_6_padding_0_MASK 0xff0000 +#define DPM_TABLE_333__AcpLevel_6_padding_0__SHIFT 0x10 +#define DPM_TABLE_333__AcpLevel_6_Divider_MASK 0xff000000 +#define DPM_TABLE_333__AcpLevel_6_Divider__SHIFT 0x18 +#define DPM_TABLE_334__AcpLevel_7_Frequency_MASK 0xffffffff +#define DPM_TABLE_334__AcpLevel_7_Frequency__SHIFT 0x0 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_336__AcpLevel_7_padding_2_MASK 0xff +#define DPM_TABLE_336__AcpLevel_7_padding_2__SHIFT 0x0 +#define DPM_TABLE_336__AcpLevel_7_padding_1_MASK 0xff00 +#define DPM_TABLE_336__AcpLevel_7_padding_1__SHIFT 0x8 +#define DPM_TABLE_336__AcpLevel_7_padding_0_MASK 0xff0000 +#define DPM_TABLE_336__AcpLevel_7_padding_0__SHIFT 0x10 +#define DPM_TABLE_336__AcpLevel_7_Divider_MASK 0xff000000 +#define DPM_TABLE_336__AcpLevel_7_Divider__SHIFT 0x18 +#define DPM_TABLE_337__SamuLevel_0_Frequency_MASK 0xffffffff +#define DPM_TABLE_337__SamuLevel_0_Frequency__SHIFT 0x0 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_339__SamuLevel_0_padding_2_MASK 0xff +#define DPM_TABLE_339__SamuLevel_0_padding_2__SHIFT 0x0 +#define DPM_TABLE_339__SamuLevel_0_padding_1_MASK 0xff00 +#define DPM_TABLE_339__SamuLevel_0_padding_1__SHIFT 0x8 +#define DPM_TABLE_339__SamuLevel_0_padding_0_MASK 0xff0000 +#define DPM_TABLE_339__SamuLevel_0_padding_0__SHIFT 0x10 +#define DPM_TABLE_339__SamuLevel_0_Divider_MASK 0xff000000 +#define DPM_TABLE_339__SamuLevel_0_Divider__SHIFT 0x18 +#define DPM_TABLE_340__SamuLevel_1_Frequency_MASK 0xffffffff +#define DPM_TABLE_340__SamuLevel_1_Frequency__SHIFT 0x0 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_342__SamuLevel_1_padding_2_MASK 0xff +#define DPM_TABLE_342__SamuLevel_1_padding_2__SHIFT 0x0 +#define DPM_TABLE_342__SamuLevel_1_padding_1_MASK 0xff00 +#define DPM_TABLE_342__SamuLevel_1_padding_1__SHIFT 0x8 +#define DPM_TABLE_342__SamuLevel_1_padding_0_MASK 0xff0000 +#define DPM_TABLE_342__SamuLevel_1_padding_0__SHIFT 0x10 +#define DPM_TABLE_342__SamuLevel_1_Divider_MASK 0xff000000 +#define DPM_TABLE_342__SamuLevel_1_Divider__SHIFT 0x18 +#define DPM_TABLE_343__SamuLevel_2_Frequency_MASK 0xffffffff +#define DPM_TABLE_343__SamuLevel_2_Frequency__SHIFT 0x0 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_345__SamuLevel_2_padding_2_MASK 0xff +#define DPM_TABLE_345__SamuLevel_2_padding_2__SHIFT 0x0 +#define DPM_TABLE_345__SamuLevel_2_padding_1_MASK 0xff00 +#define DPM_TABLE_345__SamuLevel_2_padding_1__SHIFT 0x8 +#define DPM_TABLE_345__SamuLevel_2_padding_0_MASK 0xff0000 +#define DPM_TABLE_345__SamuLevel_2_padding_0__SHIFT 0x10 +#define DPM_TABLE_345__SamuLevel_2_Divider_MASK 0xff000000 +#define DPM_TABLE_345__SamuLevel_2_Divider__SHIFT 0x18 +#define DPM_TABLE_346__SamuLevel_3_Frequency_MASK 0xffffffff +#define DPM_TABLE_346__SamuLevel_3_Frequency__SHIFT 0x0 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_348__SamuLevel_3_padding_2_MASK 0xff +#define DPM_TABLE_348__SamuLevel_3_padding_2__SHIFT 0x0 +#define DPM_TABLE_348__SamuLevel_3_padding_1_MASK 0xff00 +#define DPM_TABLE_348__SamuLevel_3_padding_1__SHIFT 0x8 +#define DPM_TABLE_348__SamuLevel_3_padding_0_MASK 0xff0000 +#define DPM_TABLE_348__SamuLevel_3_padding_0__SHIFT 0x10 +#define DPM_TABLE_348__SamuLevel_3_Divider_MASK 0xff000000 +#define DPM_TABLE_348__SamuLevel_3_Divider__SHIFT 0x18 +#define DPM_TABLE_349__SamuLevel_4_Frequency_MASK 0xffffffff +#define DPM_TABLE_349__SamuLevel_4_Frequency__SHIFT 0x0 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_351__SamuLevel_4_padding_2_MASK 0xff +#define DPM_TABLE_351__SamuLevel_4_padding_2__SHIFT 0x0 +#define DPM_TABLE_351__SamuLevel_4_padding_1_MASK 0xff00 +#define DPM_TABLE_351__SamuLevel_4_padding_1__SHIFT 0x8 +#define DPM_TABLE_351__SamuLevel_4_padding_0_MASK 0xff0000 +#define DPM_TABLE_351__SamuLevel_4_padding_0__SHIFT 0x10 +#define DPM_TABLE_351__SamuLevel_4_Divider_MASK 0xff000000 +#define DPM_TABLE_351__SamuLevel_4_Divider__SHIFT 0x18 +#define DPM_TABLE_352__SamuLevel_5_Frequency_MASK 0xffffffff +#define DPM_TABLE_352__SamuLevel_5_Frequency__SHIFT 0x0 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_354__SamuLevel_5_padding_2_MASK 0xff +#define DPM_TABLE_354__SamuLevel_5_padding_2__SHIFT 0x0 +#define DPM_TABLE_354__SamuLevel_5_padding_1_MASK 0xff00 +#define DPM_TABLE_354__SamuLevel_5_padding_1__SHIFT 0x8 +#define DPM_TABLE_354__SamuLevel_5_padding_0_MASK 0xff0000 +#define DPM_TABLE_354__SamuLevel_5_padding_0__SHIFT 0x10 +#define DPM_TABLE_354__SamuLevel_5_Divider_MASK 0xff000000 +#define DPM_TABLE_354__SamuLevel_5_Divider__SHIFT 0x18 +#define DPM_TABLE_355__SamuLevel_6_Frequency_MASK 0xffffffff +#define DPM_TABLE_355__SamuLevel_6_Frequency__SHIFT 0x0 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_357__SamuLevel_6_padding_2_MASK 0xff +#define DPM_TABLE_357__SamuLevel_6_padding_2__SHIFT 0x0 +#define DPM_TABLE_357__SamuLevel_6_padding_1_MASK 0xff00 +#define DPM_TABLE_357__SamuLevel_6_padding_1__SHIFT 0x8 +#define DPM_TABLE_357__SamuLevel_6_padding_0_MASK 0xff0000 +#define DPM_TABLE_357__SamuLevel_6_padding_0__SHIFT 0x10 +#define DPM_TABLE_357__SamuLevel_6_Divider_MASK 0xff000000 +#define DPM_TABLE_357__SamuLevel_6_Divider__SHIFT 0x18 +#define DPM_TABLE_358__SamuLevel_7_Frequency_MASK 0xffffffff +#define DPM_TABLE_358__SamuLevel_7_Frequency__SHIFT 0x0 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_360__SamuLevel_7_padding_2_MASK 0xff +#define DPM_TABLE_360__SamuLevel_7_padding_2__SHIFT 0x0 +#define DPM_TABLE_360__SamuLevel_7_padding_1_MASK 0xff00 +#define DPM_TABLE_360__SamuLevel_7_padding_1__SHIFT 0x8 +#define DPM_TABLE_360__SamuLevel_7_padding_0_MASK 0xff0000 +#define DPM_TABLE_360__SamuLevel_7_padding_0__SHIFT 0x10 +#define DPM_TABLE_360__SamuLevel_7_Divider_MASK 0xff000000 +#define DPM_TABLE_360__SamuLevel_7_Divider__SHIFT 0x18 +#define DPM_TABLE_361__Ulv_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_361__Ulv_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_362__Ulv_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_362__Ulv_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_363__Ulv_VddcPhase_MASK 0xff +#define DPM_TABLE_363__Ulv_VddcPhase__SHIFT 0x0 +#define DPM_TABLE_363__Ulv_VddcOffsetVid_MASK 0xff00 +#define DPM_TABLE_363__Ulv_VddcOffsetVid__SHIFT 0x8 +#define DPM_TABLE_363__Ulv_VddcOffset_MASK 0xffff0000 +#define DPM_TABLE_363__Ulv_VddcOffset__SHIFT 0x10 +#define DPM_TABLE_364__Ulv_Reserved_MASK 0xffffffff +#define DPM_TABLE_364__Ulv_Reserved__SHIFT 0x0 +#define DPM_TABLE_365__SclkStepSize_MASK 0xffffffff +#define DPM_TABLE_365__SclkStepSize__SHIFT 0x0 +#define DPM_TABLE_366__Smio_0_MASK 0xffffffff +#define DPM_TABLE_366__Smio_0__SHIFT 0x0 +#define DPM_TABLE_367__Smio_1_MASK 0xffffffff +#define DPM_TABLE_367__Smio_1__SHIFT 0x0 +#define DPM_TABLE_368__Smio_2_MASK 0xffffffff +#define DPM_TABLE_368__Smio_2__SHIFT 0x0 +#define DPM_TABLE_369__Smio_3_MASK 0xffffffff +#define DPM_TABLE_369__Smio_3__SHIFT 0x0 +#define DPM_TABLE_370__Smio_4_MASK 0xffffffff +#define DPM_TABLE_370__Smio_4__SHIFT 0x0 +#define DPM_TABLE_371__Smio_5_MASK 0xffffffff +#define DPM_TABLE_371__Smio_5__SHIFT 0x0 +#define DPM_TABLE_372__Smio_6_MASK 0xffffffff +#define DPM_TABLE_372__Smio_6__SHIFT 0x0 +#define DPM_TABLE_373__Smio_7_MASK 0xffffffff +#define DPM_TABLE_373__Smio_7__SHIFT 0x0 +#define DPM_TABLE_374__Smio_8_MASK 0xffffffff +#define DPM_TABLE_374__Smio_8__SHIFT 0x0 +#define DPM_TABLE_375__Smio_9_MASK 0xffffffff +#define DPM_TABLE_375__Smio_9__SHIFT 0x0 +#define DPM_TABLE_376__Smio_10_MASK 0xffffffff +#define DPM_TABLE_376__Smio_10__SHIFT 0x0 +#define DPM_TABLE_377__Smio_11_MASK 0xffffffff +#define DPM_TABLE_377__Smio_11__SHIFT 0x0 +#define DPM_TABLE_378__Smio_12_MASK 0xffffffff +#define DPM_TABLE_378__Smio_12__SHIFT 0x0 +#define DPM_TABLE_379__Smio_13_MASK 0xffffffff +#define DPM_TABLE_379__Smio_13__SHIFT 0x0 +#define DPM_TABLE_380__Smio_14_MASK 0xffffffff +#define DPM_TABLE_380__Smio_14__SHIFT 0x0 +#define DPM_TABLE_381__Smio_15_MASK 0xffffffff +#define DPM_TABLE_381__Smio_15__SHIFT 0x0 +#define DPM_TABLE_382__Smio_16_MASK 0xffffffff +#define DPM_TABLE_382__Smio_16__SHIFT 0x0 +#define DPM_TABLE_383__Smio_17_MASK 0xffffffff +#define DPM_TABLE_383__Smio_17__SHIFT 0x0 +#define DPM_TABLE_384__Smio_18_MASK 0xffffffff +#define DPM_TABLE_384__Smio_18__SHIFT 0x0 +#define DPM_TABLE_385__Smio_19_MASK 0xffffffff +#define DPM_TABLE_385__Smio_19__SHIFT 0x0 +#define DPM_TABLE_386__Smio_20_MASK 0xffffffff +#define DPM_TABLE_386__Smio_20__SHIFT 0x0 +#define DPM_TABLE_387__Smio_21_MASK 0xffffffff +#define DPM_TABLE_387__Smio_21__SHIFT 0x0 +#define DPM_TABLE_388__Smio_22_MASK 0xffffffff +#define DPM_TABLE_388__Smio_22__SHIFT 0x0 +#define DPM_TABLE_389__Smio_23_MASK 0xffffffff +#define DPM_TABLE_389__Smio_23__SHIFT 0x0 +#define DPM_TABLE_390__Smio_24_MASK 0xffffffff +#define DPM_TABLE_390__Smio_24__SHIFT 0x0 +#define DPM_TABLE_391__Smio_25_MASK 0xffffffff +#define DPM_TABLE_391__Smio_25__SHIFT 0x0 +#define DPM_TABLE_392__Smio_26_MASK 0xffffffff +#define DPM_TABLE_392__Smio_26__SHIFT 0x0 +#define DPM_TABLE_393__Smio_27_MASK 0xffffffff +#define DPM_TABLE_393__Smio_27__SHIFT 0x0 +#define DPM_TABLE_394__Smio_28_MASK 0xffffffff +#define DPM_TABLE_394__Smio_28__SHIFT 0x0 +#define DPM_TABLE_395__Smio_29_MASK 0xffffffff +#define DPM_TABLE_395__Smio_29__SHIFT 0x0 +#define DPM_TABLE_396__Smio_30_MASK 0xffffffff +#define DPM_TABLE_396__Smio_30__SHIFT 0x0 +#define DPM_TABLE_397__Smio_31_MASK 0xffffffff +#define DPM_TABLE_397__Smio_31__SHIFT 0x0 +#define DPM_TABLE_398__SamuBootLevel_MASK 0xff +#define DPM_TABLE_398__SamuBootLevel__SHIFT 0x0 +#define DPM_TABLE_398__AcpBootLevel_MASK 0xff00 +#define DPM_TABLE_398__AcpBootLevel__SHIFT 0x8 +#define DPM_TABLE_398__VceBootLevel_MASK 0xff0000 +#define DPM_TABLE_398__VceBootLevel__SHIFT 0x10 +#define DPM_TABLE_398__UvdBootLevel_MASK 0xff000000 +#define DPM_TABLE_398__UvdBootLevel__SHIFT 0x18 +#define DPM_TABLE_399__GraphicsInterval_MASK 0xff +#define DPM_TABLE_399__GraphicsInterval__SHIFT 0x0 +#define DPM_TABLE_399__GraphicsThermThrottleEnable_MASK 0xff00 +#define DPM_TABLE_399__GraphicsThermThrottleEnable__SHIFT 0x8 +#define DPM_TABLE_399__GraphicsVoltageChangeEnable_MASK 0xff0000 +#define DPM_TABLE_399__GraphicsVoltageChangeEnable__SHIFT 0x10 +#define DPM_TABLE_399__GraphicsBootLevel_MASK 0xff000000 +#define DPM_TABLE_399__GraphicsBootLevel__SHIFT 0x18 +#define DPM_TABLE_400__TemperatureLimitHigh_MASK 0xffff +#define DPM_TABLE_400__TemperatureLimitHigh__SHIFT 0x0 +#define DPM_TABLE_400__ThermalInterval_MASK 0xff0000 +#define DPM_TABLE_400__ThermalInterval__SHIFT 0x10 +#define DPM_TABLE_400__VoltageInterval_MASK 0xff000000 +#define DPM_TABLE_400__VoltageInterval__SHIFT 0x18 +#define DPM_TABLE_401__MemoryVoltageChangeEnable_MASK 0xff +#define DPM_TABLE_401__MemoryVoltageChangeEnable__SHIFT 0x0 +#define DPM_TABLE_401__MemoryBootLevel_MASK 0xff00 +#define DPM_TABLE_401__MemoryBootLevel__SHIFT 0x8 +#define DPM_TABLE_401__TemperatureLimitLow_MASK 0xffff0000 +#define DPM_TABLE_401__TemperatureLimitLow__SHIFT 0x10 +#define DPM_TABLE_402__MemoryThermThrottleEnable_MASK 0xff +#define DPM_TABLE_402__MemoryThermThrottleEnable__SHIFT 0x0 +#define DPM_TABLE_402__MemoryInterval_MASK 0xff00 +#define DPM_TABLE_402__MemoryInterval__SHIFT 0x8 +#define DPM_TABLE_402__BootMVdd_MASK 0xffff0000 +#define DPM_TABLE_402__BootMVdd__SHIFT 0x10 +#define DPM_TABLE_403__PhaseResponseTime_MASK 0xffff +#define DPM_TABLE_403__PhaseResponseTime__SHIFT 0x0 +#define DPM_TABLE_403__VoltageResponseTime_MASK 0xffff0000 +#define DPM_TABLE_403__VoltageResponseTime__SHIFT 0x10 +#define DPM_TABLE_404__DTEMode_MASK 0xff +#define DPM_TABLE_404__DTEMode__SHIFT 0x0 +#define DPM_TABLE_404__DTEInterval_MASK 0xff00 +#define DPM_TABLE_404__DTEInterval__SHIFT 0x8 +#define DPM_TABLE_404__PCIeGenInterval_MASK 0xff0000 +#define DPM_TABLE_404__PCIeGenInterval__SHIFT 0x10 +#define DPM_TABLE_404__PCIeBootLinkLevel_MASK 0xff000000 +#define DPM_TABLE_404__PCIeBootLinkLevel__SHIFT 0x18 +#define DPM_TABLE_405__ThermGpio_MASK 0xff +#define DPM_TABLE_405__ThermGpio__SHIFT 0x0 +#define DPM_TABLE_405__AcDcGpio_MASK 0xff00 +#define DPM_TABLE_405__AcDcGpio__SHIFT 0x8 +#define DPM_TABLE_405__VRHotGpio_MASK 0xff0000 +#define DPM_TABLE_405__VRHotGpio__SHIFT 0x10 +#define DPM_TABLE_405__SVI2Enable_MASK 0xff000000 +#define DPM_TABLE_405__SVI2Enable__SHIFT 0x18 +#define DPM_TABLE_406__PPM_TemperatureLimit_MASK 0xffff +#define DPM_TABLE_406__PPM_TemperatureLimit__SHIFT 0x0 +#define DPM_TABLE_406__PPM_PkgPwrLimit_MASK 0xffff0000 +#define DPM_TABLE_406__PPM_PkgPwrLimit__SHIFT 0x10 +#define DPM_TABLE_407__TargetTdp_MASK 0xffff +#define DPM_TABLE_407__TargetTdp__SHIFT 0x0 +#define DPM_TABLE_407__DefaultTdp_MASK 0xffff0000 +#define DPM_TABLE_407__DefaultTdp__SHIFT 0x10 +#define DPM_TABLE_408__FpsLowThreshold_MASK 0xffff +#define DPM_TABLE_408__FpsLowThreshold__SHIFT 0x0 +#define DPM_TABLE_408__FpsHighThreshold_MASK 0xffff0000 +#define DPM_TABLE_408__FpsHighThreshold__SHIFT 0x10 +#define DPM_TABLE_409__BAPMTI_R_0_1_0_MASK 0xffff +#define DPM_TABLE_409__BAPMTI_R_0_1_0__SHIFT 0x0 +#define DPM_TABLE_409__BAPMTI_R_0_0_0_MASK 0xffff0000 +#define DPM_TABLE_409__BAPMTI_R_0_0_0__SHIFT 0x10 +#define DPM_TABLE_410__BAPMTI_R_1_0_0_MASK 0xffff +#define DPM_TABLE_410__BAPMTI_R_1_0_0__SHIFT 0x0 +#define DPM_TABLE_410__BAPMTI_R_0_2_0_MASK 0xffff0000 +#define DPM_TABLE_410__BAPMTI_R_0_2_0__SHIFT 0x10 +#define DPM_TABLE_411__BAPMTI_R_1_2_0_MASK 0xffff +#define DPM_TABLE_411__BAPMTI_R_1_2_0__SHIFT 0x0 +#define DPM_TABLE_411__BAPMTI_R_1_1_0_MASK 0xffff0000 +#define DPM_TABLE_411__BAPMTI_R_1_1_0__SHIFT 0x10 +#define DPM_TABLE_412__BAPMTI_R_2_1_0_MASK 0xffff +#define DPM_TABLE_412__BAPMTI_R_2_1_0__SHIFT 0x0 +#define DPM_TABLE_412__BAPMTI_R_2_0_0_MASK 0xffff0000 +#define DPM_TABLE_412__BAPMTI_R_2_0_0__SHIFT 0x10 +#define DPM_TABLE_413__BAPMTI_R_3_0_0_MASK 0xffff +#define DPM_TABLE_413__BAPMTI_R_3_0_0__SHIFT 0x0 +#define DPM_TABLE_413__BAPMTI_R_2_2_0_MASK 0xffff0000 +#define DPM_TABLE_413__BAPMTI_R_2_2_0__SHIFT 0x10 +#define DPM_TABLE_414__BAPMTI_R_3_2_0_MASK 0xffff +#define DPM_TABLE_414__BAPMTI_R_3_2_0__SHIFT 0x0 +#define DPM_TABLE_414__BAPMTI_R_3_1_0_MASK 0xffff0000 +#define DPM_TABLE_414__BAPMTI_R_3_1_0__SHIFT 0x10 +#define DPM_TABLE_415__BAPMTI_R_4_1_0_MASK 0xffff +#define DPM_TABLE_415__BAPMTI_R_4_1_0__SHIFT 0x0 +#define DPM_TABLE_415__BAPMTI_R_4_0_0_MASK 0xffff0000 +#define DPM_TABLE_415__BAPMTI_R_4_0_0__SHIFT 0x10 +#define DPM_TABLE_416__BAPMTI_RC_0_0_0_MASK 0xffff +#define DPM_TABLE_416__BAPMTI_RC_0_0_0__SHIFT 0x0 +#define DPM_TABLE_416__BAPMTI_R_4_2_0_MASK 0xffff0000 +#define DPM_TABLE_416__BAPMTI_R_4_2_0__SHIFT 0x10 +#define DPM_TABLE_417__BAPMTI_RC_0_2_0_MASK 0xffff +#define DPM_TABLE_417__BAPMTI_RC_0_2_0__SHIFT 0x0 +#define DPM_TABLE_417__BAPMTI_RC_0_1_0_MASK 0xffff0000 +#define DPM_TABLE_417__BAPMTI_RC_0_1_0__SHIFT 0x10 +#define DPM_TABLE_418__BAPMTI_RC_1_1_0_MASK 0xffff +#define DPM_TABLE_418__BAPMTI_RC_1_1_0__SHIFT 0x0 +#define DPM_TABLE_418__BAPMTI_RC_1_0_0_MASK 0xffff0000 +#define DPM_TABLE_418__BAPMTI_RC_1_0_0__SHIFT 0x10 +#define DPM_TABLE_419__BAPMTI_RC_2_0_0_MASK 0xffff +#define DPM_TABLE_419__BAPMTI_RC_2_0_0__SHIFT 0x0 +#define DPM_TABLE_419__BAPMTI_RC_1_2_0_MASK 0xffff0000 +#define DPM_TABLE_419__BAPMTI_RC_1_2_0__SHIFT 0x10 +#define DPM_TABLE_420__BAPMTI_RC_2_2_0_MASK 0xffff +#define DPM_TABLE_420__BAPMTI_RC_2_2_0__SHIFT 0x0 +#define DPM_TABLE_420__BAPMTI_RC_2_1_0_MASK 0xffff0000 +#define DPM_TABLE_420__BAPMTI_RC_2_1_0__SHIFT 0x10 +#define DPM_TABLE_421__BAPMTI_RC_3_1_0_MASK 0xffff +#define DPM_TABLE_421__BAPMTI_RC_3_1_0__SHIFT 0x0 +#define DPM_TABLE_421__BAPMTI_RC_3_0_0_MASK 0xffff0000 +#define DPM_TABLE_421__BAPMTI_RC_3_0_0__SHIFT 0x10 +#define DPM_TABLE_422__BAPMTI_RC_4_0_0_MASK 0xffff +#define DPM_TABLE_422__BAPMTI_RC_4_0_0__SHIFT 0x0 +#define DPM_TABLE_422__BAPMTI_RC_3_2_0_MASK 0xffff0000 +#define DPM_TABLE_422__BAPMTI_RC_3_2_0__SHIFT 0x10 +#define DPM_TABLE_423__BAPMTI_RC_4_2_0_MASK 0xffff +#define DPM_TABLE_423__BAPMTI_RC_4_2_0__SHIFT 0x0 +#define DPM_TABLE_423__BAPMTI_RC_4_1_0_MASK 0xffff0000 +#define DPM_TABLE_423__BAPMTI_RC_4_1_0__SHIFT 0x10 +#define DPM_TABLE_424__GpuTjHyst_MASK 0xff +#define DPM_TABLE_424__GpuTjHyst__SHIFT 0x0 +#define DPM_TABLE_424__GpuTjMax_MASK 0xff00 +#define DPM_TABLE_424__GpuTjMax__SHIFT 0x8 +#define DPM_TABLE_424__DTETjOffset_MASK 0xff0000 +#define DPM_TABLE_424__DTETjOffset__SHIFT 0x10 +#define DPM_TABLE_424__DTEAmbientTempBase_MASK 0xff000000 +#define DPM_TABLE_424__DTEAmbientTempBase__SHIFT 0x18 +#define DPM_TABLE_425__BootVoltage_Phases_MASK 0xff +#define DPM_TABLE_425__BootVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_425__BootVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_425__BootVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_425__BootVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_425__BootVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_425__BootVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_425__BootVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_426__BAPM_TEMP_GRADIENT_MASK 0xffffffff +#define DPM_TABLE_426__BAPM_TEMP_GRADIENT__SHIFT 0x0 +#define DPM_TABLE_427__LowSclkInterruptThreshold_MASK 0xffffffff +#define DPM_TABLE_427__LowSclkInterruptThreshold__SHIFT 0x0 +#define DPM_TABLE_428__VddGfxReChkWait_MASK 0xffffffff +#define DPM_TABLE_428__VddGfxReChkWait__SHIFT 0x0 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1_MASK 0xff +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1__SHIFT 0x0 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0_MASK 0xff00 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0__SHIFT 0x8 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID_MASK 0xff0000 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID__SHIFT 0x10 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID_MASK 0xff000000 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID__SHIFT 0x18 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3_MASK 0xff +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3__SHIFT 0x0 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2_MASK 0xff00 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2__SHIFT 0x8 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1_MASK 0xff0000 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1__SHIFT 0x10 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0_MASK 0xff000000 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0__SHIFT 0x18 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7_MASK 0xff +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7__SHIFT 0x0 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6_MASK 0xff00 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6__SHIFT 0x8 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5_MASK 0xff0000 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5__SHIFT 0x10 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4_MASK 0xff000000 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4__SHIFT 0x18 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1_MASK 0xff +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1__SHIFT 0x0 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0_MASK 0xff00 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0__SHIFT 0x8 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID_MASK 0xff0000 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID__SHIFT 0x10 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID_MASK 0xff000000 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID__SHIFT 0x18 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3_MASK 0xff +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3__SHIFT 0x0 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2_MASK 0xff00 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2__SHIFT 0x8 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1_MASK 0xff0000 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1__SHIFT 0x10 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0_MASK 0xff000000 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0__SHIFT 0x18 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7_MASK 0xff +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7__SHIFT 0x0 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6_MASK 0xff00 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6__SHIFT 0x8 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5_MASK 0xff0000 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5__SHIFT 0x10 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4_MASK 0xff000000 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4__SHIFT 0x18 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1_MASK 0xff +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1__SHIFT 0x0 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0_MASK 0xff00 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0__SHIFT 0x8 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID_MASK 0xff0000 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID__SHIFT 0x10 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID_MASK 0xff000000 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID__SHIFT 0x18 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3_MASK 0xff +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3__SHIFT 0x0 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2_MASK 0xff00 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2__SHIFT 0x8 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1_MASK 0xff0000 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1__SHIFT 0x10 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0_MASK 0xff000000 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0__SHIFT 0x18 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7_MASK 0xff +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7__SHIFT 0x0 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6_MASK 0xff00 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6__SHIFT 0x8 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5_MASK 0xff0000 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5__SHIFT 0x10 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4_MASK 0xff000000 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4__SHIFT 0x18 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1_MASK 0xff +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1__SHIFT 0x0 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0_MASK 0xff00 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0__SHIFT 0x8 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID_MASK 0xff0000 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID__SHIFT 0x10 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID_MASK 0xff000000 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID__SHIFT 0x18 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3_MASK 0xff +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3__SHIFT 0x0 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2_MASK 0xff00 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2__SHIFT 0x8 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1_MASK 0xff0000 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1__SHIFT 0x10 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0_MASK 0xff000000 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0__SHIFT 0x18 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7_MASK 0xff +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7__SHIFT 0x0 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6_MASK 0xff00 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6__SHIFT 0x8 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5_MASK 0xff0000 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5__SHIFT 0x10 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4_MASK 0xff000000 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_4__PreVBlankGap_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_4__PreVBlankGap__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_5__VBlankTimeout_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_5__VBlankTimeout__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_6__TrainTimeGap_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_6__TrainTimeGap__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_9__AcpiDelay_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_9__AcpiDelay__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_10__G5TrainTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_10__G5TrainTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_13__HandshakeDisables_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_13__HandshakeDisables__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config_MASK 0xff +#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config_MASK 0xff +#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_18__AverageGioActivity_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_18__AverageGioActivity__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels_MASK 0xff +#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels_MASK 0xff +#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_26__UlvEnterCount_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_26__UlvEnterCount__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_27__UlvTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_27__UlvTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_29__Reserved_0_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_29__Reserved_0__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_30__Reserved_1_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_30__Reserved_1__SHIFT 0x0 +#define PM_FUSES_1__SviLoadLineOffsetVddC_MASK 0xff +#define PM_FUSES_1__SviLoadLineOffsetVddC__SHIFT 0x0 +#define PM_FUSES_1__SviLoadLineTrimVddC_MASK 0xff00 +#define PM_FUSES_1__SviLoadLineTrimVddC__SHIFT 0x8 +#define PM_FUSES_1__SviLoadLineVddC_MASK 0xff0000 +#define PM_FUSES_1__SviLoadLineVddC__SHIFT 0x10 +#define PM_FUSES_1__SviLoadLineEn_MASK 0xff000000 +#define PM_FUSES_1__SviLoadLineEn__SHIFT 0x18 +#define PM_FUSES_2__TDC_MAWt_MASK 0xff +#define PM_FUSES_2__TDC_MAWt__SHIFT 0x0 +#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc_MASK 0xff00 +#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc__SHIFT 0x8 +#define PM_FUSES_2__TDC_VDDC_PkgLimit_MASK 0xffff0000 +#define PM_FUSES_2__TDC_VDDC_PkgLimit__SHIFT 0x10 +#define PM_FUSES_3__Reserved_MASK 0xff +#define PM_FUSES_3__Reserved__SHIFT 0x0 +#define PM_FUSES_3__LPMLTemperatureMax_MASK 0xff00 +#define PM_FUSES_3__LPMLTemperatureMax__SHIFT 0x8 +#define PM_FUSES_3__LPMLTemperatureMin_MASK 0xff0000 +#define PM_FUSES_3__LPMLTemperatureMin__SHIFT 0x10 +#define PM_FUSES_3__TdcWaterfallCtl_MASK 0xff000000 +#define PM_FUSES_3__TdcWaterfallCtl__SHIFT 0x18 +#define PM_FUSES_4__LPMLTemperatureScaler_3_MASK 0xff +#define PM_FUSES_4__LPMLTemperatureScaler_3__SHIFT 0x0 +#define PM_FUSES_4__LPMLTemperatureScaler_2_MASK 0xff00 +#define PM_FUSES_4__LPMLTemperatureScaler_2__SHIFT 0x8 +#define PM_FUSES_4__LPMLTemperatureScaler_1_MASK 0xff0000 +#define PM_FUSES_4__LPMLTemperatureScaler_1__SHIFT 0x10 +#define PM_FUSES_4__LPMLTemperatureScaler_0_MASK 0xff000000 +#define PM_FUSES_4__LPMLTemperatureScaler_0__SHIFT 0x18 +#define PM_FUSES_5__LPMLTemperatureScaler_7_MASK 0xff +#define PM_FUSES_5__LPMLTemperatureScaler_7__SHIFT 0x0 +#define PM_FUSES_5__LPMLTemperatureScaler_6_MASK 0xff00 +#define PM_FUSES_5__LPMLTemperatureScaler_6__SHIFT 0x8 +#define PM_FUSES_5__LPMLTemperatureScaler_5_MASK 0xff0000 +#define PM_FUSES_5__LPMLTemperatureScaler_5__SHIFT 0x10 +#define PM_FUSES_5__LPMLTemperatureScaler_4_MASK 0xff000000 +#define PM_FUSES_5__LPMLTemperatureScaler_4__SHIFT 0x18 +#define PM_FUSES_6__LPMLTemperatureScaler_11_MASK 0xff +#define PM_FUSES_6__LPMLTemperatureScaler_11__SHIFT 0x0 +#define PM_FUSES_6__LPMLTemperatureScaler_10_MASK 0xff00 +#define PM_FUSES_6__LPMLTemperatureScaler_10__SHIFT 0x8 +#define PM_FUSES_6__LPMLTemperatureScaler_9_MASK 0xff0000 +#define PM_FUSES_6__LPMLTemperatureScaler_9__SHIFT 0x10 +#define PM_FUSES_6__LPMLTemperatureScaler_8_MASK 0xff000000 +#define PM_FUSES_6__LPMLTemperatureScaler_8__SHIFT 0x18 +#define PM_FUSES_7__LPMLTemperatureScaler_15_MASK 0xff +#define PM_FUSES_7__LPMLTemperatureScaler_15__SHIFT 0x0 +#define PM_FUSES_7__LPMLTemperatureScaler_14_MASK 0xff00 +#define PM_FUSES_7__LPMLTemperatureScaler_14__SHIFT 0x8 +#define PM_FUSES_7__LPMLTemperatureScaler_13_MASK 0xff0000 +#define PM_FUSES_7__LPMLTemperatureScaler_13__SHIFT 0x10 +#define PM_FUSES_7__LPMLTemperatureScaler_12_MASK 0xff000000 +#define PM_FUSES_7__LPMLTemperatureScaler_12__SHIFT 0x18 +#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta_MASK 0xffff +#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta__SHIFT 0x0 +#define PM_FUSES_8__FuzzyFan_ErrorSetDelta_MASK 0xffff0000 +#define PM_FUSES_8__FuzzyFan_ErrorSetDelta__SHIFT 0x10 +#define PM_FUSES_9__Reserved6_MASK 0xffff +#define PM_FUSES_9__Reserved6__SHIFT 0x0 +#define PM_FUSES_9__FuzzyFan_PwmSetDelta_MASK 0xffff0000 +#define PM_FUSES_9__FuzzyFan_PwmSetDelta__SHIFT 0x10 +#define PM_FUSES_10__GnbLPML_3_MASK 0xff +#define PM_FUSES_10__GnbLPML_3__SHIFT 0x0 +#define PM_FUSES_10__GnbLPML_2_MASK 0xff00 +#define PM_FUSES_10__GnbLPML_2__SHIFT 0x8 +#define PM_FUSES_10__GnbLPML_1_MASK 0xff0000 +#define PM_FUSES_10__GnbLPML_1__SHIFT 0x10 +#define PM_FUSES_10__GnbLPML_0_MASK 0xff000000 +#define PM_FUSES_10__GnbLPML_0__SHIFT 0x18 +#define PM_FUSES_11__GnbLPML_7_MASK 0xff +#define PM_FUSES_11__GnbLPML_7__SHIFT 0x0 +#define PM_FUSES_11__GnbLPML_6_MASK 0xff00 +#define PM_FUSES_11__GnbLPML_6__SHIFT 0x8 +#define PM_FUSES_11__GnbLPML_5_MASK 0xff0000 +#define PM_FUSES_11__GnbLPML_5__SHIFT 0x10 +#define PM_FUSES_11__GnbLPML_4_MASK 0xff000000 +#define PM_FUSES_11__GnbLPML_4__SHIFT 0x18 +#define PM_FUSES_12__GnbLPML_11_MASK 0xff +#define PM_FUSES_12__GnbLPML_11__SHIFT 0x0 +#define PM_FUSES_12__GnbLPML_10_MASK 0xff00 +#define PM_FUSES_12__GnbLPML_10__SHIFT 0x8 +#define PM_FUSES_12__GnbLPML_9_MASK 0xff0000 +#define PM_FUSES_12__GnbLPML_9__SHIFT 0x10 +#define PM_FUSES_12__GnbLPML_8_MASK 0xff000000 +#define PM_FUSES_12__GnbLPML_8__SHIFT 0x18 +#define PM_FUSES_13__GnbLPML_15_MASK 0xff +#define PM_FUSES_13__GnbLPML_15__SHIFT 0x0 +#define PM_FUSES_13__GnbLPML_14_MASK 0xff00 +#define PM_FUSES_13__GnbLPML_14__SHIFT 0x8 +#define PM_FUSES_13__GnbLPML_13_MASK 0xff0000 +#define PM_FUSES_13__GnbLPML_13__SHIFT 0x10 +#define PM_FUSES_13__GnbLPML_12_MASK 0xff000000 +#define PM_FUSES_13__GnbLPML_12__SHIFT 0x18 +#define PM_FUSES_14__Reserved1_1_MASK 0xff +#define PM_FUSES_14__Reserved1_1__SHIFT 0x0 +#define PM_FUSES_14__Reserved1_0_MASK 0xff00 +#define PM_FUSES_14__Reserved1_0__SHIFT 0x8 +#define PM_FUSES_14__GnbLPMLMinVid_MASK 0xff0000 +#define PM_FUSES_14__GnbLPMLMinVid__SHIFT 0x10 +#define PM_FUSES_14__GnbLPMLMaxVid_MASK 0xff000000 +#define PM_FUSES_14__GnbLPMLMaxVid__SHIFT 0x18 +#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd_MASK 0xffff +#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd__SHIFT 0x0 +#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd_MASK 0xffff0000 +#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd__SHIFT 0x10 +#define SMU_PM_STATUS_0__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_0__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_1__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_1__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_2__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_2__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_3__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_3__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_4__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_4__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_5__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_5__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_6__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_6__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_7__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_7__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_8__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_8__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_9__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_9__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_10__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_10__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_11__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_11__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_12__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_12__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_13__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_13__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_14__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_14__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_15__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_15__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_16__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_16__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_17__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_17__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_18__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_18__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_19__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_19__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_20__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_20__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_21__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_21__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_22__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_22__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_23__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_23__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_24__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_24__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_25__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_25__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_26__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_26__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_27__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_27__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_28__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_28__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_29__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_29__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_30__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_30__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_31__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_31__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_32__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_32__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_33__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_33__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_34__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_34__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_35__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_35__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_36__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_36__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_37__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_37__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_38__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_38__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_39__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_39__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_40__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_40__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_41__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_41__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_42__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_42__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_43__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_43__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_44__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_44__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_45__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_45__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_46__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_46__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_47__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_47__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_48__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_48__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_49__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_49__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_50__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_50__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_51__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_51__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_52__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_52__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_53__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_53__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_54__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_54__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_55__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_55__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_56__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_56__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_57__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_57__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_58__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_58__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_59__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_59__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_60__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_60__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_61__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_61__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_62__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_62__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_63__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_63__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_64__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_64__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_65__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_65__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_66__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_66__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_67__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_67__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_68__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_68__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_69__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_69__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_70__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_70__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_71__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_71__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_72__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_72__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_73__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_73__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_74__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_74__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_75__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_75__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_76__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_76__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_77__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_77__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_78__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_78__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_79__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_79__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_80__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_80__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_81__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_81__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_82__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_82__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_83__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_83__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_84__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_84__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_85__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_85__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_86__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_86__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_87__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_87__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_88__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_88__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_89__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_89__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_90__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_90__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_91__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_91__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_92__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_92__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_93__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_93__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_94__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_94__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_95__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_95__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_96__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_96__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_97__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_97__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_98__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_98__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_99__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_99__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_100__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_100__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_101__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_101__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_102__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_102__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_103__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_103__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_104__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_104__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_105__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_105__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_106__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_106__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_107__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_107__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_108__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_108__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_109__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_109__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_110__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_110__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_111__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_111__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_112__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_112__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_113__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_113__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_114__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_114__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_115__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_115__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_116__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_116__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_117__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_117__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_118__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_118__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_119__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_119__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_120__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_120__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_121__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_121__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_122__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_122__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_123__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_123__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_124__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_124__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_125__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_125__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_126__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_126__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_127__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_127__DATA__SHIFT 0x0 +#define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1 +#define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0 +#define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2 +#define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2 +#define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8 +#define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3 +#define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10 +#define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5 +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0 +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00 +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8 +#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000 +#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10 +#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000 +#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18 +#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000 +#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19 +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000 +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000 +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b +#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000 +#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c +#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1 +#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0 +#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2 +#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3 +#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x7 +#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x0 +#define CG_THERMAL_CTRL__THERM_INC_CLK_MASK 0x8 +#define CG_THERMAL_CTRL__THERM_INC_CLK__SHIFT 0x3 +#define CG_THERMAL_CTRL__SPARE_MASK 0x3ff0 +#define CG_THERMAL_CTRL__SPARE__SHIFT 0x4 +#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x3fc000 +#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0xe +#define CG_THERMAL_CTRL__RESERVED_MASK 0x1c00000 +#define CG_THERMAL_CTRL__RESERVED__SHIFT 0x16 +#define CG_THERMAL_CTRL__CTF_PAD_POLARITY_MASK 0x2000000 +#define CG_THERMAL_CTRL__CTF_PAD_POLARITY__SHIFT 0x19 +#define CG_THERMAL_CTRL__CTF_PAD_EN_MASK 0x4000000 +#define CG_THERMAL_CTRL__CTF_PAD_EN__SHIFT 0x1a +#define CG_THERMAL_STATUS__SPARE_MASK 0x1ff +#define CG_THERMAL_STATUS__SPARE__SHIFT 0x0 +#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x1fe00 +#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9 +#define CG_THERMAL_STATUS__THERM_ALERT_MASK 0x20000 +#define CG_THERMAL_STATUS__THERM_ALERT__SHIFT 0x11 +#define CG_THERMAL_STATUS__GEN_STATUS_MASK 0x3c0000 +#define CG_THERMAL_STATUS__GEN_STATUS__SHIFT 0x12 +#define CG_THERMAL_INT__DIG_THERM_CTF_MASK 0xff +#define CG_THERMAL_INT__DIG_THERM_CTF__SHIFT 0x0 +#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0xff00 +#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x8 +#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0xff0000 +#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x10 +#define CG_THERMAL_INT__THERM_INT_MASK_MASK 0xf000000 +#define CG_THERMAL_INT__THERM_INT_MASK__SHIFT 0x18 +#define CG_MULT_THERMAL_CTRL__TS_FILTER_MASK 0xf +#define CG_MULT_THERMAL_CTRL__TS_FILTER__SHIFT 0x0 +#define CG_MULT_THERMAL_CTRL__UNUSED_MASK 0x1f0 +#define CG_MULT_THERMAL_CTRL__UNUSED__SHIFT 0x4 +#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST_MASK 0x200 +#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST__SHIFT 0x9 +#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0xff00000 +#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x14 +#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR_MASK 0x10000000 +#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR__SHIFT 0x1c +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x1ff +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0 +#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x3fe00 +#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9 +#define THM_TMON2_CTRL__POWER_DOWN_MASK 0x1 +#define THM_TMON2_CTRL__POWER_DOWN__SHIFT 0x0 +#define THM_TMON2_CTRL__BGADJ_MASK 0x1fe +#define THM_TMON2_CTRL__BGADJ__SHIFT 0x1 +#define THM_TMON2_CTRL__BGADJ_MODE_MASK 0x200 +#define THM_TMON2_CTRL__BGADJ_MODE__SHIFT 0x9 +#define THM_TMON2_CTRL__TMON_PAUSE_MASK 0x400 +#define THM_TMON2_CTRL__TMON_PAUSE__SHIFT 0xa +#define THM_TMON2_CTRL__INT_MEAS_EN_MASK 0x800 +#define THM_TMON2_CTRL__INT_MEAS_EN__SHIFT 0xb +#define THM_TMON2_CTRL__DEBUG_MODE_MASK 0x1000 +#define THM_TMON2_CTRL__DEBUG_MODE__SHIFT 0xc +#define THM_TMON2_CTRL__EN_CFG_SERDES_MASK 0x2000 +#define THM_TMON2_CTRL__EN_CFG_SERDES__SHIFT 0xd +#define THM_TMON2_CTRL2__RDIL_PRESENT_MASK 0xffff +#define THM_TMON2_CTRL2__RDIL_PRESENT__SHIFT 0x0 +#define THM_TMON2_CTRL2__RDIR_PRESENT_MASK 0xffff0000 +#define THM_TMON2_CTRL2__RDIR_PRESENT__SHIFT 0x10 +#define THM_TMON2_CSR_WR__CSR_WRITE_MASK 0x1 +#define THM_TMON2_CSR_WR__CSR_WRITE__SHIFT 0x0 +#define THM_TMON2_CSR_WR__CSR_READ_MASK 0x2 +#define THM_TMON2_CSR_WR__CSR_READ__SHIFT 0x1 +#define THM_TMON2_CSR_WR__CSR_ADDR_MASK 0xffc +#define THM_TMON2_CSR_WR__CSR_ADDR__SHIFT 0x2 +#define THM_TMON2_CSR_WR__WRITE_DATA_MASK 0xfff000 +#define THM_TMON2_CSR_WR__WRITE_DATA__SHIFT 0xc +#define THM_TMON2_CSR_WR__SPARE_MASK 0x1000000 +#define THM_TMON2_CSR_WR__SPARE__SHIFT 0x18 +#define THM_TMON2_CSR_RD__READ_DATA_MASK 0xfff +#define THM_TMON2_CSR_RD__READ_DATA__SHIFT 0x0 +#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0xff +#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0 +#define CG_FDO_CTRL0__FAN_SPINUP_DUTY_MASK 0xff00 +#define CG_FDO_CTRL0__FAN_SPINUP_DUTY__SHIFT 0x8 +#define CG_FDO_CTRL0__FDO_PWM_MANUAL_MASK 0x10000 +#define CG_FDO_CTRL0__FDO_PWM_MANUAL__SHIFT 0x10 +#define CG_FDO_CTRL0__FDO_PWM_HYSTER_MASK 0x7e0000 +#define CG_FDO_CTRL0__FDO_PWM_HYSTER__SHIFT 0x11 +#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN_MASK 0x800000 +#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN__SHIFT 0x17 +#define CG_FDO_CTRL0__FDO_PWM_RAMP_MASK 0xff000000 +#define CG_FDO_CTRL0__FDO_PWM_RAMP__SHIFT 0x18 +#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0xff +#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0 +#define CG_FDO_CTRL1__FMIN_DUTY_MASK 0xff00 +#define CG_FDO_CTRL1__FMIN_DUTY__SHIFT 0x8 +#define CG_FDO_CTRL1__M_MASK 0xff0000 +#define CG_FDO_CTRL1__M__SHIFT 0x10 +#define CG_FDO_CTRL1__RESERVED_MASK 0x3f000000 +#define CG_FDO_CTRL1__RESERVED__SHIFT 0x18 +#define CG_FDO_CTRL1__FDO_PWRDNB_MASK 0x40000000 +#define CG_FDO_CTRL1__FDO_PWRDNB__SHIFT 0x1e +#define CG_FDO_CTRL2__TMIN_MASK 0xff +#define CG_FDO_CTRL2__TMIN__SHIFT 0x0 +#define CG_FDO_CTRL2__FAN_SPINUP_TIME_MASK 0x700 +#define CG_FDO_CTRL2__FAN_SPINUP_TIME__SHIFT 0x8 +#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x3800 +#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb +#define CG_FDO_CTRL2__TMIN_HYSTER_MASK 0x1c000 +#define CG_FDO_CTRL2__TMIN_HYSTER__SHIFT 0xe +#define CG_FDO_CTRL2__TMAX_MASK 0x1fe0000 +#define CG_FDO_CTRL2__TMAX__SHIFT 0x11 +#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xfe000000 +#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x19 +#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x7 +#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x0 +#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xfffffff8 +#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3 +#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xffffffff +#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x0 +#define CC_THM_STRAPS0__TMON0_BGADJ_MASK 0x1fe +#define CC_THM_STRAPS0__TMON0_BGADJ__SHIFT 0x1 +#define CC_THM_STRAPS0__TMON1_BGADJ_MASK 0x1fe00 +#define CC_THM_STRAPS0__TMON1_BGADJ__SHIFT 0x9 +#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL_MASK 0x20000 +#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL__SHIFT 0x11 +#define CC_THM_STRAPS0__NUM_ACQ_MASK 0x1c0000 +#define CC_THM_STRAPS0__NUM_ACQ__SHIFT 0x12 +#define CC_THM_STRAPS0__TMON_CLK_SEL_MASK 0xe00000 +#define CC_THM_STRAPS0__TMON_CLK_SEL__SHIFT 0x15 +#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE_MASK 0x1000000 +#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE__SHIFT 0x18 +#define CC_THM_STRAPS0__CTF_DISABLE_MASK 0x2000000 +#define CC_THM_STRAPS0__CTF_DISABLE__SHIFT 0x19 +#define CC_THM_STRAPS0__TMON0_DISABLE_MASK 0x4000000 +#define CC_THM_STRAPS0__TMON0_DISABLE__SHIFT 0x1a +#define CC_THM_STRAPS0__TMON1_DISABLE_MASK 0x8000000 +#define CC_THM_STRAPS0__TMON1_DISABLE__SHIFT 0x1b +#define CC_THM_STRAPS0__TMON2_DISABLE_MASK 0x10000000 +#define CC_THM_STRAPS0__TMON2_DISABLE__SHIFT 0x1c +#define CC_THM_STRAPS0__TMON3_DISABLE_MASK 0x20000000 +#define CC_THM_STRAPS0__TMON3_DISABLE__SHIFT 0x1d +#define CC_THM_STRAPS0__UNUSED_MASK 0x80000000 +#define CC_THM_STRAPS0__UNUSED__SHIFT 0x1f +#define THM_TMON0_RDIL0_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL1_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL2_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL3_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL4_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL5_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL6_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL7_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL8_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL9_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL10_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL11_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL12_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL13_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL14_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL15_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR0_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR1_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR2_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR3_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR4_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR5_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR6_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR7_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR8_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR9_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR10_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR11_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR12_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR13_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR14_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR15_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL0_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL1_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL2_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL3_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL4_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL5_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL6_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL7_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL8_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL9_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL10_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL11_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL12_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL13_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL14_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL15_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR0_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR1_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR2_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR3_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR4_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR5_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR6_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR7_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR8_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR9_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR10_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR11_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR12_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR13_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR14_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR15_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL0_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL0_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL0_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL0_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL0_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL1_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL1_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL1_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL1_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL1_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL2_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL2_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL2_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL2_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL2_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL3_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL3_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL3_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL3_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL3_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL4_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL4_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL4_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL4_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL4_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL5_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL5_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL5_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL5_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL5_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL6_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL6_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL6_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL6_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL6_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL7_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL7_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL7_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL7_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL7_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL8_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL8_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL8_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL8_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL8_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL9_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL9_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL9_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL9_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL9_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL10_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL10_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL10_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL10_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL10_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL11_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL11_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL11_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL11_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL11_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL12_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL12_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL12_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL12_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL12_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL13_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL13_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL13_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL13_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL13_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL14_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL14_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL14_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL14_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL14_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL15_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL15_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL15_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL15_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL15_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR0_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR0_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR0_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR0_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR0_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR1_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR1_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR1_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR1_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR1_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR2_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR2_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR2_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR2_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR2_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR3_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR3_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR3_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR3_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR3_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR4_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR4_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR4_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR4_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR4_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR5_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR5_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR5_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR5_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR5_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR6_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR6_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR6_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR6_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR6_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR7_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR7_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR7_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR7_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR7_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR8_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR8_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR8_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR8_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR8_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR9_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR9_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR9_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR9_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR9_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR10_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR10_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR10_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR10_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR10_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR11_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR11_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR11_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR11_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR11_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR12_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR12_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR12_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR12_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR12_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR13_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR13_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR13_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR13_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR13_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR14_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR14_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR14_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR14_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR14_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR15_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR15_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR15_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR15_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR15_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_INT_DATA__Z_MASK 0x7ff +#define THM_TMON0_INT_DATA__Z__SHIFT 0x0 +#define THM_TMON0_INT_DATA__VALID_MASK 0x800 +#define THM_TMON0_INT_DATA__VALID__SHIFT 0xb +#define THM_TMON0_INT_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_INT_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_INT_DATA__Z_MASK 0x7ff +#define THM_TMON1_INT_DATA__Z__SHIFT 0x0 +#define THM_TMON1_INT_DATA__VALID_MASK 0x800 +#define THM_TMON1_INT_DATA__VALID__SHIFT 0xb +#define THM_TMON1_INT_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_INT_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_INT_DATA__Z_MASK 0x7ff +#define THM_TMON2_INT_DATA__Z__SHIFT 0x0 +#define THM_TMON2_INT_DATA__VALID_MASK 0x800 +#define THM_TMON2_INT_DATA__VALID__SHIFT 0xb +#define THM_TMON2_INT_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_INT_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x1f +#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x0 +#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0xffe0 +#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x5 +#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x1f +#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x0 +#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0xffe0 +#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x5 +#define THM_TMON2_DEBUG__DEBUG_RDI_MASK 0x1f +#define THM_TMON2_DEBUG__DEBUG_RDI__SHIFT 0x0 +#define THM_TMON2_DEBUG__DEBUG_Z_MASK 0xffe0 +#define THM_TMON2_DEBUG__DEBUG_Z__SHIFT 0x5 +#define THM_TMON0_STATUS__CURRENT_RDI_MASK 0x1f +#define THM_TMON0_STATUS__CURRENT_RDI__SHIFT 0x0 +#define THM_TMON0_STATUS__MEAS_DONE_MASK 0x20 +#define THM_TMON0_STATUS__MEAS_DONE__SHIFT 0x5 +#define THM_TMON1_STATUS__CURRENT_RDI_MASK 0x1f +#define THM_TMON1_STATUS__CURRENT_RDI__SHIFT 0x0 +#define THM_TMON1_STATUS__MEAS_DONE_MASK 0x20 +#define THM_TMON1_STATUS__MEAS_DONE__SHIFT 0x5 +#define THM_TMON2_STATUS__CURRENT_RDI_MASK 0x1f +#define THM_TMON2_STATUS__CURRENT_RDI__SHIFT 0x0 +#define THM_TMON2_STATUS__MEAS_DONE_MASK 0x20 +#define THM_TMON2_STATUS__MEAS_DONE__SHIFT 0x5 +#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1 +#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0 +#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2 +#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3 +#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40 +#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6 +#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100 +#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8 +#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200 +#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9 +#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400 +#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa +#define GENERAL_PWRMGT__SPARE11_MASK 0x800 +#define GENERAL_PWRMGT__SPARE11__SHIFT 0xb +#define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000 +#define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe +#define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000 +#define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf +#define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000 +#define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10 +#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000 +#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11 +#define GENERAL_PWRMGT__SPARE18_MASK 0x40000 +#define GENERAL_PWRMGT__SPARE18__SHIFT 0x12 +#define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000 +#define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13 +#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000 +#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17 +#define GENERAL_PWRMGT__SPARE27_MASK 0x8000000 +#define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b +#define GENERAL_PWRMGT__SPARE_MASK 0xf0000000 +#define GENERAL_PWRMGT__SPARE__SHIFT 0x1c +#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3 +#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0 +#define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4 +#define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2 +#define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8 +#define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3 +#define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10 +#define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4 +#define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0 +#define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5 +#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1 +#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0 +#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10 +#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4 +#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20 +#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5 +#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000 +#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe +#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000 +#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf +#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000 +#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10 +#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000 +#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d +#define PWR_PCC_CONTROL__PCC_POLARITY_MASK 0x1 +#define PWR_PCC_CONTROL__PCC_POLARITY__SHIFT 0x0 +#define PWR_PCC_GPIO_SELECT__GPIO_MASK 0xffffffff +#define PWR_PCC_GPIO_SELECT__GPIO__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf +#define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0 +#define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0 +#define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4 +#define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00 +#define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8 +#define PLL_TEST_CNTL__TST_RESET_MASK 0x8000 +#define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf +#define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000 +#define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11 +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0 +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000 +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c +#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff +#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0 +#define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f +#define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0 +#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80 +#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7 +#define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7 +#define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8 +#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0 +#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4 +#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000 +#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11 +#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000 +#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12 +#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000 +#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13 +#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000 +#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14 +#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000 +#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15 +#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000 +#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16 +#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000 +#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17 +#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000 +#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19 +#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000 +#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a +#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000 +#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b +#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000 +#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c +#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000 +#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d +#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000 +#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e +#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000 +#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f +#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1 +#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2 +#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1 +#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4 +#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2 +#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8 +#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10 +#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4 +#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40 +#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6 +#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80 +#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7 +#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100 +#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8 +#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200 +#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9 +#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400 +#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa +#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800 +#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000 +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000 +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd +#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000 +#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe +#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000 +#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15 +#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000 +#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7 +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38 +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14 +#define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7 +#define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0 +#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8 +#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3 +#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0 +#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4 +#define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000 +#define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10 +#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000 +#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f +#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1 +#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0 +#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2 +#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3 +#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10 +#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4 +#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20 +#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd +#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000 +#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe +#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000 +#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10 +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13 +#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000 +#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14 +#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000 +#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c +#define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff +#define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0 +#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000 +#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10 +#define SCLK_MIN_DIV__FRACV_MASK 0xfff +#define SCLK_MIN_DIV__FRACV__SHIFT 0x0 +#define SCLK_MIN_DIV__INTV_MASK 0x7f000 +#define SCLK_MIN_DIV__INTV__SHIFT 0xc +#define PWR_AVFS_SEL__AvfsSel_MASK 0xfffffff +#define PWR_AVFS_SEL__AvfsSel__SHIFT 0x0 +#define PWR_AVFS_CNTL__MmBusIn_MASK 0xff +#define PWR_AVFS_CNTL__MmBusIn__SHIFT 0x0 +#define PWR_AVFS_CNTL__MmLclRdEn_MASK 0x100 +#define PWR_AVFS_CNTL__MmLclRdEn__SHIFT 0x8 +#define PWR_AVFS_CNTL__MmLclWrEn_MASK 0x200 +#define PWR_AVFS_CNTL__MmLclWrEn__SHIFT 0x9 +#define PWR_AVFS_CNTL__MmLclSz_MASK 0xc00 +#define PWR_AVFS_CNTL__MmLclSz__SHIFT 0xa +#define PWR_AVFS_CNTL__MmState_MASK 0x3f000 +#define PWR_AVFS_CNTL__MmState__SHIFT 0xc +#define PWR_AVFS_CNTL__PsmScanMode_MASK 0x40000 +#define PWR_AVFS_CNTL__PsmScanMode__SHIFT 0x12 +#define PWR_AVFS_CNTL__PsmGater_MASK 0x80000 +#define PWR_AVFS_CNTL__PsmGater__SHIFT 0x13 +#define PWR_AVFS_CNTL__PsmTrst_MASK 0x100000 +#define PWR_AVFS_CNTL__PsmTrst__SHIFT 0x14 +#define PWR_AVFS_CNTL__PsmEn_MASK 0x200000 +#define PWR_AVFS_CNTL__PsmEn__SHIFT 0x15 +#define PWR_AVFS_CNTL__SkipPhaseEn_MASK 0x400000 +#define PWR_AVFS_CNTL__SkipPhaseEn__SHIFT 0x16 +#define PWR_AVFS_CNTL__Isolate_MASK 0x800000 +#define PWR_AVFS_CNTL__Isolate__SHIFT 0x17 +#define PWR_AVFS_CNTL__AvfsRst_MASK 0x1000000 +#define PWR_AVFS_CNTL__AvfsRst__SHIFT 0x18 +#define PWR_AVFS_CNTL__PccIsolateEn_MASK 0x2000000 +#define PWR_AVFS_CNTL__PccIsolateEn__SHIFT 0x19 +#define PWR_AVFS_CNTL__DeepSleepIsolateEn_MASK 0x4000000 +#define PWR_AVFS_CNTL__DeepSleepIsolateEn__SHIFT 0x1a +#define PWR_AVFS0_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS0_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS0_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS0_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS0_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS0_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS1_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS1_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS1_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS1_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS1_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS1_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS2_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS2_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS2_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS2_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS2_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS2_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS3_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS3_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS3_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS3_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS3_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS3_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS4_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS4_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS4_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS4_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS4_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS4_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS5_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS5_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS5_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS5_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS5_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS5_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS6_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS6_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS6_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS6_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS6_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS6_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS7_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS7_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS7_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS7_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS7_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS7_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS8_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS8_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS8_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS8_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS8_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS8_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS9_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS9_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS9_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS9_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS9_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS9_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS10_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS10_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS10_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS10_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS10_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS10_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS11_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS11_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS11_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS11_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS11_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS11_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS12_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS12_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS12_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS12_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS12_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS12_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS13_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS13_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS13_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS13_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS13_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS13_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS14_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS14_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS14_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS14_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS14_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS14_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS15_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS15_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS15_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS15_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS15_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS15_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS16_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS16_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS16_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS16_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS16_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS16_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS17_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS17_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS17_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS17_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS17_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS17_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS18_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS18_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS18_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS18_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS18_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS18_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS19_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS19_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS19_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS19_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS19_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS19_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS20_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS20_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS20_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS20_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS20_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS20_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS21_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS21_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS21_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS21_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS21_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS21_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS22_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS22_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS22_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS22_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS22_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS22_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS23_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS23_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS23_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS23_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS23_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS23_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS24_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS24_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS24_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS24_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS24_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS24_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS25_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS25_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS25_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS25_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS25_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS25_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS26_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS26_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS26_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS26_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS26_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS26_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS27_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS27_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS27_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS27_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS27_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS27_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_CKS_ENABLE__STRETCH_ENABLE_MASK 0x1 +#define PWR_CKS_ENABLE__STRETCH_ENABLE__SHIFT 0x0 +#define PWR_CKS_ENABLE__masterReset_MASK 0x2 +#define PWR_CKS_ENABLE__masterReset__SHIFT 0x1 +#define PWR_CKS_ENABLE__staticEnable_MASK 0x4 +#define PWR_CKS_ENABLE__staticEnable__SHIFT 0x2 +#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT_MASK 0x8 +#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT__SHIFT 0x3 +#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN_MASK 0x10 +#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN__SHIFT 0x4 +#define PWR_CKS_ENABLE__MET_CTRL_SEL_MASK 0x60 +#define PWR_CKS_ENABLE__MET_CTRL_SEL__SHIFT 0x5 +#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN_MASK 0x80 +#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN__SHIFT 0x7 +#define PWR_CKS_CNTL__CKS_BYPASS_MASK 0x1 +#define PWR_CKS_CNTL__CKS_BYPASS__SHIFT 0x0 +#define PWR_CKS_CNTL__CKS_PCCEnable_MASK 0x2 +#define PWR_CKS_CNTL__CKS_PCCEnable__SHIFT 0x1 +#define PWR_CKS_CNTL__CKS_TEMP_COMP_MASK 0x4 +#define PWR_CKS_CNTL__CKS_TEMP_COMP__SHIFT 0x2 +#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT_MASK 0x78 +#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT__SHIFT 0x3 +#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS_MASK 0x80 +#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS__SHIFT 0x7 +#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE_MASK 0xf00 +#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE__SHIFT 0x8 +#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES_MASK 0xf000 +#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES__SHIFT 0xc +#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ_MASK 0x10000 +#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ__SHIFT 0x10 +#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP_MASK 0x20000 +#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP__SHIFT 0x11 +#define PWR_CKS_CNTL__CKS_LDO_REFSEL_MASK 0x3c0000 +#define PWR_CKS_CNTL__CKS_LDO_REFSEL__SHIFT 0x12 +#define PWR_CKS_CNTL__DDT_DEBUS_SEL_MASK 0x400000 +#define PWR_CKS_CNTL__DDT_DEBUS_SEL__SHIFT 0x16 +#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL_MASK 0x7f800000 +#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL__SHIFT 0x17 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x4 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x4 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH_MASK 0x3ff +#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_MASK 0xffff +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD__SHIFT 0x0 +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT_MASK 0xf0000 +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT__SHIFT 0x10 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN_MASK 0x1 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN__SHIFT 0x0 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT_MASK 0x2 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT__SHIFT 0x1 +#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT_MASK 0x4 +#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT__SHIFT 0x2 +#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE_MASK 0x8 +#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE__SHIFT 0x3 +#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ_MASK 0x1 +#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ__SHIFT 0x0 +#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1 +#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0 +#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe +#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1 +#define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff +#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0 +#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff +#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0 +#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1 +#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0 +#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe +#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1 +#define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff +#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0 +#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff +#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0 +#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1 +#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0 +#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe +#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1 +#define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff +#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0 +#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff +#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0 +#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1 +#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0 +#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe +#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1 +#define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff +#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0 +#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff +#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0 +#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x1 +#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x0 +#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x1fffe +#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x1 +#define LCAC_MC4_CNTL__MC4_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC4_CNTL__MC4_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC4_CNTL__MC4_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC4_CNTL__MC4_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffff +#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x0 +#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffff +#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x0 +#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x1 +#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x0 +#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x1fffe +#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x1 +#define LCAC_MC5_CNTL__MC5_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC5_CNTL__MC5_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC5_CNTL__MC5_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC5_CNTL__MC5_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffff +#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x0 +#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffff +#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x0 +#define LCAC_MC6_CNTL__MC6_ENABLE_MASK 0x1 +#define LCAC_MC6_CNTL__MC6_ENABLE__SHIFT 0x0 +#define LCAC_MC6_CNTL__MC6_THRESHOLD_MASK 0x1fffe +#define LCAC_MC6_CNTL__MC6_THRESHOLD__SHIFT 0x1 +#define LCAC_MC6_CNTL__MC6_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC6_CNTL__MC6_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC6_CNTL__MC6_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC6_CNTL__MC6_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL_MASK 0xffffffff +#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL__SHIFT 0x0 +#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL_MASK 0xffffffff +#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL__SHIFT 0x0 +#define LCAC_MC7_CNTL__MC7_ENABLE_MASK 0x1 +#define LCAC_MC7_CNTL__MC7_ENABLE__SHIFT 0x0 +#define LCAC_MC7_CNTL__MC7_THRESHOLD_MASK 0x1fffe +#define LCAC_MC7_CNTL__MC7_THRESHOLD__SHIFT 0x1 +#define LCAC_MC7_CNTL__MC7_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC7_CNTL__MC7_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC7_CNTL__MC7_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC7_CNTL__MC7_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL_MASK 0xffffffff +#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL__SHIFT 0x0 +#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL_MASK 0xffffffff +#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL__SHIFT 0x0 +#define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1 +#define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0 +#define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe +#define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1 +#define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000 +#define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11 +#define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16 +#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff +#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0 +#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff +#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0 +#define ROM_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define ROM_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define ROM_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define ROM_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define ROM_CNTL__SCK_OVERWRITE_MASK 0x2 +#define ROM_CNTL__SCK_OVERWRITE__SHIFT 0x1 +#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x4 +#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x2 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME_MASK 0xff00 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME__SHIFT 0x8 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME_MASK 0xff0000 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME__SHIFT 0x10 +#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0xf000000 +#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18 +#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK_MASK 0xf0000000 +#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK__SHIFT 0x1c +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0xffffff +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x1000000 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x18 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x2000000 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0xc000000 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a +#define ROM_STATUS__ROM_BUSY_MASK 0x1 +#define ROM_STATUS__ROM_BUSY__SHIFT 0x0 +#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0xf +#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0 +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0 +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f +#define ROM_INDEX__ROM_INDEX_MASK 0xffffff +#define ROM_INDEX__ROM_INDEX__SHIFT 0x0 +#define ROM_DATA__ROM_DATA_MASK 0xffffffff +#define ROM_DATA__ROM_DATA__SHIFT 0x0 +#define ROM_START__ROM_START_MASK 0xffffff +#define ROM_START__ROM_START__SHIFT 0x0 +#define ROM_SW_CNTL__DATA_SIZE_MASK 0xffff +#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0 +#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x30000 +#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10 +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x40000 +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x12 +#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x1 +#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0 +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0xff +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0 +#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xffffff00 +#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8 +#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf +#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0 +#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000 +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000 +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f +#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf +#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0 +#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000 +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000 +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f +#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xffffffff +#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0 +#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xffffffff +#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0xffff +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0xffff +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0xffff +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0xffff +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0xffff +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0xffff +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12_MASK 0xffff +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14_MASK 0xffff +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15__SHIFT 0x10 +#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0xffff +#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0 +#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000 +#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10 + +#endif /* SMU_7_1_3_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h index e8fae5c77514..e8fae5c77514 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom-bits.h +++ b/drivers/gpu/drm/amd/include/atom-bits.h diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/include/atom-names.h index 6f907a5ffa5f..6f907a5ffa5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom-names.h +++ b/drivers/gpu/drm/amd/include/atom-names.h diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/include/atom-types.h index 1125b866cdb0..1125b866cdb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom-types.h +++ b/drivers/gpu/drm/amd/include/atom-types.h diff --git a/drivers/gpu/drm/amd/amdgpu/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 44c5d4a4d1bf..44c5d4a4d1bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h new file mode 100644 index 000000000000..992dcd8a5c6a --- /dev/null +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -0,0 +1,624 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _CGS_COMMON_H +#define _CGS_COMMON_H + +#include "amd_shared.h" + +/** + * enum cgs_gpu_mem_type - GPU memory types + */ +enum cgs_gpu_mem_type { + CGS_GPU_MEM_TYPE__VISIBLE_FB, + CGS_GPU_MEM_TYPE__INVISIBLE_FB, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB, + CGS_GPU_MEM_TYPE__GART_CACHEABLE, + CGS_GPU_MEM_TYPE__GART_WRITECOMBINE +}; + +/** + * enum cgs_ind_reg - Indirect register spaces + */ +enum cgs_ind_reg { + CGS_IND_REG__MMIO, + CGS_IND_REG__PCIE, + CGS_IND_REG__SMC, + CGS_IND_REG__UVD_CTX, + CGS_IND_REG__DIDT, + CGS_IND_REG__AUDIO_ENDPT +}; + +/** + * enum cgs_clock - Clocks controlled by the SMU + */ +enum cgs_clock { + CGS_CLOCK__SCLK, + CGS_CLOCK__MCLK, + CGS_CLOCK__VCLK, + CGS_CLOCK__DCLK, + CGS_CLOCK__ECLK, + CGS_CLOCK__ACLK, + CGS_CLOCK__ICLK, + /* ... */ +}; + +/** + * enum cgs_engine - Engines that can be statically power-gated + */ +enum cgs_engine { + CGS_ENGINE__UVD, + CGS_ENGINE__VCE, + CGS_ENGINE__VP8, + CGS_ENGINE__ACP_DMA, + CGS_ENGINE__ACP_DSP0, + CGS_ENGINE__ACP_DSP1, + CGS_ENGINE__ISP, + /* ... */ +}; + +/** + * enum cgs_voltage_planes - Voltage planes for external camera HW + */ +enum cgs_voltage_planes { + CGS_VOLTAGE_PLANE__SENSOR0, + CGS_VOLTAGE_PLANE__SENSOR1, + /* ... */ +}; + +/* + * enum cgs_ucode_id - Firmware types for different IPs + */ +enum cgs_ucode_id { + CGS_UCODE_ID_SMU = 0, + CGS_UCODE_ID_SDMA0, + CGS_UCODE_ID_SDMA1, + CGS_UCODE_ID_CP_CE, + CGS_UCODE_ID_CP_PFP, + CGS_UCODE_ID_CP_ME, + CGS_UCODE_ID_CP_MEC, + CGS_UCODE_ID_CP_MEC_JT1, + CGS_UCODE_ID_CP_MEC_JT2, + CGS_UCODE_ID_GMCON_RENG, + CGS_UCODE_ID_RLC_G, + CGS_UCODE_ID_MAXIMUM, +}; + +/** + * struct cgs_clock_limits - Clock limits + * + * Clocks are specified in 10KHz units. + */ +struct cgs_clock_limits { + unsigned min; /**< Minimum supported frequency */ + unsigned max; /**< Maxumim supported frequency */ + unsigned sustainable; /**< Thermally sustainable frequency */ +}; + +/** + * struct cgs_firmware_info - Firmware information + */ +struct cgs_firmware_info { + uint16_t version; + uint16_t feature_version; + uint32_t image_size; + uint64_t mc_addr; + void *kptr; +}; + +typedef unsigned long cgs_handle_t; + +/** + * cgs_gpu_mem_info() - Return information about memory heaps + * @cgs_device: opaque device handle + * @type: memory type + * @mc_start: Start MC address of the heap (output) + * @mc_size: MC address space size (output) + * @mem_size: maximum amount of memory available for allocation (output) + * + * This function returns information about memory heaps. The type + * parameter is used to select the memory heap. The mc_start and + * mc_size for GART heaps may be bigger than the memory available for + * allocation. + * + * mc_start and mc_size are undefined for non-contiguous FB memory + * types, since buffers allocated with these types may or may not be + * GART mapped. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type, + uint64_t *mc_start, uint64_t *mc_size, + uint64_t *mem_size); + +/** + * cgs_gmap_kmem() - map kernel memory to GART aperture + * @cgs_device: opaque device handle + * @kmem: pointer to kernel memory + * @size: size to map + * @min_offset: minimum offset from start of GART aperture + * @max_offset: maximum offset from start of GART aperture + * @kmem_handle: kernel memory handle (output) + * @mcaddr: MC address (output) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *kmem_handle, uint64_t *mcaddr); + +/** + * cgs_gunmap_kmem() - unmap kernel memory + * @cgs_device: opaque device handle + * @kmem_handle: kernel memory handle returned by gmap_kmem + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle); + +/** + * cgs_alloc_gpu_mem() - Allocate GPU memory + * @cgs_device: opaque device handle + * @type: memory type + * @size: size in bytes + * @align: alignment in bytes + * @min_offset: minimum offset from start of heap + * @max_offset: maximum offset from start of heap + * @handle: memory handle (output) + * + * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous + * memory allocation. This guarantees that the MC address returned by + * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous + * FB memory types may be GART mapped depending on memory + * fragmentation and memory allocator policies. + * + * If min/max_offset are non-0, the allocation will be forced to + * reside between these offsets in its respective memory heap. The + * base address that the offset relates to, depends on the memory + * type. + * + * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address + * - CGS_GPU_MEM_TYPE__GART_*: GART aperture base address + * - others: undefined, don't use with max_offset + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type, + uint64_t size, uint64_t align, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *handle); + +/** + * cgs_free_gpu_mem() - Free GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle); + +/** + * cgs_gmap_gpu_mem() - GPU-map GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * @mcaddr: MC address (output) + * + * Ensures that a buffer is GPU accessible and returns its MC address. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle, + uint64_t *mcaddr); + +/** + * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * + * Allows the buffer to be migrated while it's not used by the GPU. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle); + +/** + * cgs_kmap_gpu_mem() - Kernel-map GPU memory + * + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * @map: Kernel virtual address the memory was mapped to (output) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle, + void **map); + +/** + * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle); + +/** + * cgs_read_register() - Read an MMIO register + * @cgs_device: opaque device handle + * @offset: register offset + * + * Return: register value + */ +typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset); + +/** + * cgs_write_register() - Write an MMIO register + * @cgs_device: opaque device handle + * @offset: register offset + * @value: register value + */ +typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset, + uint32_t value); + +/** + * cgs_read_ind_register() - Read an indirect register + * @cgs_device: opaque device handle + * @offset: register offset + * + * Return: register value + */ +typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space, + unsigned index); + +/** + * cgs_write_ind_register() - Write an indirect register + * @cgs_device: opaque device handle + * @offset: register offset + * @value: register value + */ +typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space, + unsigned index, uint32_t value); + +/** + * cgs_read_pci_config_byte() - Read byte from PCI configuration space + * @cgs_device: opaque device handle + * @addr: address + * + * Return: Value read + */ +typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr); + +/** + * cgs_read_pci_config_word() - Read word from PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be word-aligned + * + * Return: Value read + */ +typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr); + +/** + * cgs_read_pci_config_dword() - Read dword from PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be dword-aligned + * + * Return: Value read + */ +typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device, + unsigned addr); + +/** + * cgs_write_pci_config_byte() - Write byte to PCI configuration space + * @cgs_device: opaque device handle + * @addr: address + * @value: value to write + */ +typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr, + uint8_t value); + +/** + * cgs_write_pci_config_word() - Write byte to PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be word-aligned + * @value: value to write + */ +typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr, + uint16_t value); + +/** + * cgs_write_pci_config_dword() - Write byte to PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be dword-aligned + * @value: value to write + */ +typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr, + uint32_t value); + +/** + * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table + * @cgs_device: opaque device handle + * @table: data table index + * @size: size of the table (output, may be NULL) + * @frev: table format revision (output, may be NULL) + * @crev: table content revision (output, may be NULL) + * + * Return: Pointer to start of the table, or NULL on failure + */ +typedef const void *(*cgs_atom_get_data_table_t)( + void *cgs_device, unsigned table, + uint16_t *size, uint8_t *frev, uint8_t *crev); + +/** + * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions + * @cgs_device: opaque device handle + * @table: data table index + * @frev: table format revision (output, may be NULL) + * @crev: table content revision (output, may be NULL) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table, + uint8_t *frev, uint8_t *crev); + +/** + * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table + * @cgs_device: opaque device handle + * @table: command table index + * @args: arguments + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device, + unsigned table, void *args); + +/** + * cgs_create_pm_request() - Create a power management request + * @cgs_device: opaque device handle + * @request: handle of created PM request (output) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request); + +/** + * cgs_destroy_pm_request() - Destroy a power management request + * @cgs_device: opaque device handle + * @request: handle of created PM request + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request); + +/** + * cgs_set_pm_request() - Activate or deactiveate a PM request + * @cgs_device: opaque device handle + * @request: PM request handle + * @active: 0 = deactivate, non-0 = activate + * + * While a PM request is active, its minimum clock requests are taken + * into account as the requested engines are powered up. When the + * request is inactive, the engines may be powered down and clocks may + * be lower, depending on other PM requests by other driver + * components. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request, + int active); + +/** + * cgs_pm_request_clock() - Request a minimum frequency for a specific clock + * @cgs_device: opaque device handle + * @request: PM request handle + * @clock: which clock? + * @freq: requested min. frequency in 10KHz units (0 to clear request) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request, + enum cgs_clock clock, unsigned freq); + +/** + * cgs_pm_request_engine() - Request an engine to be powered up + * @cgs_device: opaque device handle + * @request: PM request handle + * @engine: which engine? + * @powered: 0 = powered down, non-0 = powered up + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request, + enum cgs_engine engine, int powered); + +/** + * cgs_pm_query_clock_limits() - Query clock frequency limits + * @cgs_device: opaque device handle + * @clock: which clock? + * @limits: clock limits + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device, + enum cgs_clock clock, + struct cgs_clock_limits *limits); + +/** + * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes + * @cgs_device: opaque device handle + * @mask: bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...) + * @voltages: pointer to array of voltage values in 1mV units + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask, + const uint32_t *voltages); +/** + * cgs_get_firmware_info - Get the firmware information from core driver + * @cgs_device: opaque device handle + * @type: the firmware type + * @info: returend firmware information + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_get_firmware_info)(void *cgs_device, + enum cgs_ucode_id type, + struct cgs_firmware_info *info); + +typedef int(*cgs_set_powergating_state)(void *cgs_device, + enum amd_ip_block_type block_type, + enum amd_powergating_state state); + +typedef int(*cgs_set_clockgating_state)(void *cgs_device, + enum amd_ip_block_type block_type, + enum amd_clockgating_state state); + +struct cgs_ops { + /* memory management calls (similar to KFD interface) */ + cgs_gpu_mem_info_t gpu_mem_info; + cgs_gmap_kmem_t gmap_kmem; + cgs_gunmap_kmem_t gunmap_kmem; + cgs_alloc_gpu_mem_t alloc_gpu_mem; + cgs_free_gpu_mem_t free_gpu_mem; + cgs_gmap_gpu_mem_t gmap_gpu_mem; + cgs_gunmap_gpu_mem_t gunmap_gpu_mem; + cgs_kmap_gpu_mem_t kmap_gpu_mem; + cgs_kunmap_gpu_mem_t kunmap_gpu_mem; + /* MMIO access */ + cgs_read_register_t read_register; + cgs_write_register_t write_register; + cgs_read_ind_register_t read_ind_register; + cgs_write_ind_register_t write_ind_register; + /* PCI configuration space access */ + cgs_read_pci_config_byte_t read_pci_config_byte; + cgs_read_pci_config_word_t read_pci_config_word; + cgs_read_pci_config_dword_t read_pci_config_dword; + cgs_write_pci_config_byte_t write_pci_config_byte; + cgs_write_pci_config_word_t write_pci_config_word; + cgs_write_pci_config_dword_t write_pci_config_dword; + /* ATOM BIOS */ + cgs_atom_get_data_table_t atom_get_data_table; + cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs; + cgs_atom_exec_cmd_table_t atom_exec_cmd_table; + /* Power management */ + cgs_create_pm_request_t create_pm_request; + cgs_destroy_pm_request_t destroy_pm_request; + cgs_set_pm_request_t set_pm_request; + cgs_pm_request_clock_t pm_request_clock; + cgs_pm_request_engine_t pm_request_engine; + cgs_pm_query_clock_limits_t pm_query_clock_limits; + cgs_set_camera_voltages_t set_camera_voltages; + /* Firmware Info */ + cgs_get_firmware_info get_firmware_info; + /* cg pg interface*/ + cgs_set_powergating_state set_powergating_state; + cgs_set_clockgating_state set_clockgating_state; + /* ACPI (TODO) */ +}; + +struct cgs_os_ops; /* To be define in OS-specific CGS header */ + +struct cgs_device +{ + const struct cgs_ops *ops; + const struct cgs_os_ops *os_ops; + /* to be embedded at the start of driver private structure */ +}; + +/* Convenience macros that make CGS indirect function calls look like + * normal function calls */ +#define CGS_CALL(func,dev,...) \ + (((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__)) +#define CGS_OS_CALL(func,dev,...) \ + (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__)) + +#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size) \ + CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size) +#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) \ + CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) +#define cgs_gunmap_kmem(dev,kmem_handle) \ + CGS_CALL(gunmap_kmem,dev,keme_handle) +#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \ + CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle) +#define cgs_free_gpu_mem(dev,handle) \ + CGS_CALL(free_gpu_mem,dev,handle) +#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \ + CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr) +#define cgs_gunmap_gpu_mem(dev,handle) \ + CGS_CALL(gunmap_gpu_mem,dev,handle) +#define cgs_kmap_gpu_mem(dev,handle,map) \ + CGS_CALL(kmap_gpu_mem,dev,handle,map) +#define cgs_kunmap_gpu_mem(dev,handle) \ + CGS_CALL(kunmap_gpu_mem,dev,handle) + +#define cgs_read_register(dev,offset) \ + CGS_CALL(read_register,dev,offset) +#define cgs_write_register(dev,offset,value) \ + CGS_CALL(write_register,dev,offset,value) +#define cgs_read_ind_register(dev,space,index) \ + CGS_CALL(read_ind_register,dev,space,index) +#define cgs_write_ind_register(dev,space,index,value) \ + CGS_CALL(write_ind_register,dev,space,index,value) + +#define cgs_read_pci_config_byte(dev,addr) \ + CGS_CALL(read_pci_config_byte,dev,addr) +#define cgs_read_pci_config_word(dev,addr) \ + CGS_CALL(read_pci_config_word,dev,addr) +#define cgs_read_pci_config_dword(dev,addr) \ + CGS_CALL(read_pci_config_dword,dev,addr) +#define cgs_write_pci_config_byte(dev,addr,value) \ + CGS_CALL(write_pci_config_byte,dev,addr,value) +#define cgs_write_pci_config_word(dev,addr,value) \ + CGS_CALL(write_pci_config_word,dev,addr,value) +#define cgs_write_pci_config_dword(dev,addr,value) \ + CGS_CALL(write_pci_config_dword,dev,addr,value) + +#define cgs_atom_get_data_table(dev,table,size,frev,crev) \ + CGS_CALL(atom_get_data_table,dev,table,size,frev,crev) +#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \ + CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev) +#define cgs_atom_exec_cmd_table(dev,table,args) \ + CGS_CALL(atom_exec_cmd_table,dev,table,args) + +#define cgs_create_pm_request(dev,request) \ + CGS_CALL(create_pm_request,dev,request) +#define cgs_destroy_pm_request(dev,request) \ + CGS_CALL(destroy_pm_request,dev,request) +#define cgs_set_pm_request(dev,request,active) \ + CGS_CALL(set_pm_request,dev,request,active) +#define cgs_pm_request_clock(dev,request,clock,freq) \ + CGS_CALL(pm_request_clock,dev,request,clock,freq) +#define cgs_pm_request_engine(dev,request,engine,powered) \ + CGS_CALL(pm_request_engine,dev,request,engine,powered) +#define cgs_pm_query_clock_limits(dev,clock,limits) \ + CGS_CALL(pm_query_clock_limits,dev,clock,limits) +#define cgs_set_camera_voltages(dev,mask,voltages) \ + CGS_CALL(set_camera_voltages,dev,mask,voltages) +#define cgs_get_firmware_info(dev, type, info) \ + CGS_CALL(get_firmware_info, dev, type, info) +#define cgs_set_powergating_state(dev, block_type, state) \ + CGS_CALL(set_powergating_state, dev, block_type, state) +#define cgs_set_clockgating_state(dev, block_type, state) \ + CGS_CALL(set_clockgating_state, dev, block_type, state) + +#endif /* _CGS_COMMON_H */ diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h new file mode 100644 index 000000000000..488642f08267 --- /dev/null +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -0,0 +1,135 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _CGS_LINUX_H +#define _CGS_LINUX_H + +#include "cgs_common.h" + +/** + * cgs_import_gpu_mem() - Import dmabuf handle + * @cgs_device: opaque device handle + * @dmabuf_fd: DMABuf file descriptor + * @handle: memory handle (output) + * + * Must be called in the process context that dmabuf_fd belongs to. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, + cgs_handle_t *handle); + +/** + * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources + * @private_data: private data provided to cgs_add_irq_source + * @src_id: interrupt source ID + * @type: interrupt type + * @enabled: 0 = disable source, non-0 = enable source + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_source_set_func_t)(void *private_data, + unsigned src_id, unsigned type, + int enabled); + +/** + * cgs_irq_handler_func() - Interrupt handler callback + * @private_data: private data provided to cgs_add_irq_source + * @src_id: interrupt source ID + * @iv_entry: pointer to raw ih ring entry + * + * This callback runs in interrupt context. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_handler_func_t)(void *private_data, + unsigned src_id, const uint32_t *iv_entry); + +/** + * cgs_add_irq_source() - Add an IRQ source + * @cgs_device: opaque device handle + * @src_id: interrupt source ID + * @num_types: number of interrupt types that can be independently enabled + * @set: callback function to enable/disable an interrupt type + * @handler: interrupt handler callback + * @private_data: private data to pass to callback functions + * + * The same IRQ source can be added only once. Adding an IRQ source + * indicates ownership of that IRQ source and all its IRQ types. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id, + unsigned num_types, + cgs_irq_source_set_func_t set, + cgs_irq_handler_func_t handler, + void *private_data); + +/** + * cgs_irq_get() - Request enabling an IRQ source and type + * @cgs_device: opaque device handle + * @src_id: interrupt source ID + * @type: interrupt type + * + * cgs_irq_get and cgs_irq_put calls must be balanced. They count + * "references" to IRQ sources. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); + +/** + * cgs_irq_put() - Indicate IRQ source is no longer needed + * @cgs_device: opaque device handle + * @src_id: interrupt source ID + * @type: interrupt type + * + * cgs_irq_get and cgs_irq_put calls must be balanced. They count + * "references" to IRQ sources. Even after cgs_irq_put is called, the + * IRQ handler may still be called if there are more refecences to + * the IRQ source. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); + +struct cgs_os_ops { + cgs_import_gpu_mem_t import_gpu_mem; + + /* IRQ handling */ + cgs_add_irq_source_t add_irq_source; + cgs_irq_get_t irq_get; + cgs_irq_put_t irq_put; +}; + +#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ + CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) +#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ + CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ + private_data) +#define cgs_irq_get(dev,src_id,type) \ + CGS_OS_CALL(irq_get,dev,src_id,type) +#define cgs_irq_put(dev,src_id,type) \ + CGS_OS_CALL(irq_put,dev,src_id,type) + +#endif /* _CGS_LINUX_H */ diff --git a/drivers/gpu/drm/amd/amdgpu/pptable.h b/drivers/gpu/drm/amd/include/pptable.h index 0030f726e68c..ee6978b30b77 100644 --- a/drivers/gpu/drm/amd/amdgpu/pptable.h +++ b/drivers/gpu/drm/amd/include/pptable.h @@ -146,6 +146,9 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER #define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable. #define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature. #define ATOM_PP_PLATFORM_CAP_EVV 0x00800000 +#define ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x01000000 +#define ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE 0x02000000 +#define ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC 0x04000000 typedef struct _ATOM_PPLIB_POWERPLAYTABLE { @@ -673,7 +676,8 @@ typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1 UCHAR revid; ATOM_PowerTune_Table power_tune_table; USHORT usMaximumPowerDeliveryLimit; - USHORT usReserve[7]; + USHORT usTjMax; + USHORT usReserve[6]; } ATOM_PPLIB_POWERTUNE_Table_V1; #define ATOM_PPM_A_A 1 diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c new file mode 100644 index 000000000000..265d3e2f63cc --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -0,0 +1,462 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <drm/drmP.h> +#include "gpu_scheduler.h" + +/* Initialize a given run queue struct */ +static void amd_sched_rq_init(struct amd_sched_rq *rq) +{ + INIT_LIST_HEAD(&rq->entities); + mutex_init(&rq->lock); + rq->current_entity = NULL; +} + +static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, + struct amd_sched_entity *entity) +{ + mutex_lock(&rq->lock); + list_add_tail(&entity->list, &rq->entities); + mutex_unlock(&rq->lock); +} + +static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, + struct amd_sched_entity *entity) +{ + mutex_lock(&rq->lock); + list_del_init(&entity->list); + if (rq->current_entity == entity) + rq->current_entity = NULL; + mutex_unlock(&rq->lock); +} + +/** + * Select next entity from a specified run queue with round robin policy. + * It could return the same entity as current one if current is the only + * available one in the queue. Return NULL if nothing available. + */ +static struct amd_sched_entity * +amd_sched_rq_select_entity(struct amd_sched_rq *rq) +{ + struct amd_sched_entity *entity = rq->current_entity; + + if (entity) { + list_for_each_entry_continue(entity, &rq->entities, list) { + if (!kfifo_is_empty(&entity->job_queue)) { + rq->current_entity = entity; + return rq->current_entity; + } + } + } + + list_for_each_entry(entity, &rq->entities, list) { + + if (!kfifo_is_empty(&entity->job_queue)) { + rq->current_entity = entity; + return rq->current_entity; + } + + if (entity == rq->current_entity) + break; + } + + return NULL; +} + +/** + * Note: This function should only been called inside scheduler main + * function for thread safety, there is no other protection here. + * return ture if scheduler has something ready to run. + * + * For active_hw_rq, there is only one producer(scheduler thread) and + * one consumer(ISR). It should be safe to use this function in scheduler + * main thread to decide whether to continue emit more IBs. +*/ +static bool is_scheduler_ready(struct amd_gpu_scheduler *sched) +{ + unsigned long flags; + bool full; + + spin_lock_irqsave(&sched->queue_lock, flags); + full = atomic64_read(&sched->hw_rq_count) < + sched->hw_submission_limit ? true : false; + spin_unlock_irqrestore(&sched->queue_lock, flags); + + return full; +} + +/** + * Select next entity from the kernel run queue, if not available, + * return null. +*/ +static struct amd_sched_entity * +kernel_rq_select_context(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_entity *sched_entity; + struct amd_sched_rq *rq = &sched->kernel_rq; + + mutex_lock(&rq->lock); + sched_entity = amd_sched_rq_select_entity(rq); + mutex_unlock(&rq->lock); + return sched_entity; +} + +/** + * Select next entity containing real IB submissions +*/ +static struct amd_sched_entity * +select_context(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_entity *wake_entity = NULL; + struct amd_sched_entity *tmp; + struct amd_sched_rq *rq; + + if (!is_scheduler_ready(sched)) + return NULL; + + /* Kernel run queue has higher priority than normal run queue*/ + tmp = kernel_rq_select_context(sched); + if (tmp != NULL) + goto exit; + + rq = &sched->sched_rq; + mutex_lock(&rq->lock); + tmp = amd_sched_rq_select_entity(rq); + mutex_unlock(&rq->lock); +exit: + if (sched->current_entity && (sched->current_entity != tmp)) + wake_entity = sched->current_entity; + sched->current_entity = tmp; + if (wake_entity && wake_entity->need_wakeup) + wake_up(&wake_entity->wait_queue); + return tmp; +} + +/** + * Init a context entity used by scheduler when submit to HW ring. + * + * @sched The pointer to the scheduler + * @entity The pointer to a valid amd_sched_entity + * @rq The run queue this entity belongs + * @kernel If this is an entity for the kernel + * @jobs The max number of jobs in the job queue + * + * return 0 if succeed. negative error code on failure +*/ +int amd_sched_entity_init(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_rq *rq, + uint32_t jobs) +{ + uint64_t seq_ring = 0; + char name[20]; + + if (!(sched && entity && rq)) + return -EINVAL; + + memset(entity, 0, sizeof(struct amd_sched_entity)); + seq_ring = ((uint64_t)sched->ring_id) << 60; + spin_lock_init(&entity->lock); + entity->belongto_rq = rq; + entity->scheduler = sched; + init_waitqueue_head(&entity->wait_queue); + init_waitqueue_head(&entity->wait_emit); + entity->fence_context = fence_context_alloc(1); + snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context); + memcpy(entity->name, name, 20); + entity->need_wakeup = false; + if(kfifo_alloc(&entity->job_queue, + jobs * sizeof(void *), + GFP_KERNEL)) + return -EINVAL; + + spin_lock_init(&entity->queue_lock); + atomic64_set(&entity->last_queued_v_seq, seq_ring); + atomic64_set(&entity->last_signaled_v_seq, seq_ring); + + /* Add the entity to the run queue */ + amd_sched_rq_add_entity(rq, entity); + return 0; +} + +/** + * Query if entity is initialized + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * return true if entity is initialized, false otherwise +*/ +static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity) +{ + return entity->scheduler == sched && + entity->belongto_rq != NULL; +} + +static bool is_context_entity_idle(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity) +{ + /** + * Idle means no pending IBs, and the entity is not + * currently being used. + */ + barrier(); + if ((sched->current_entity != entity) && + kfifo_is_empty(&entity->job_queue)) + return true; + + return false; +} + +/** + * Destroy a context entity + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * return 0 if succeed. negative error code on failure + */ +int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity) +{ + int r = 0; + struct amd_sched_rq *rq = entity->belongto_rq; + + if (!is_context_entity_initialized(sched, entity)) + return 0; + entity->need_wakeup = true; + /** + * The client will not queue more IBs during this fini, consume existing + * queued IBs + */ + r = wait_event_timeout( + entity->wait_queue, + is_context_entity_idle(sched, entity), + msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS) + ) ? 0 : -1; + + if (r) { + if (entity->is_pending) + DRM_INFO("Entity %p is in waiting state during fini,\ + all pending ibs will be canceled.\n", + entity); + } + + amd_sched_rq_remove_entity(rq, entity); + kfifo_free(&entity->job_queue); + return r; +} + +/** + * Submit a normal job to the job queue + * + * @sched The pointer to the scheduler + * @c_entity The pointer to amd_sched_entity + * @job The pointer to job required to submit + * return 0 if succeed. -1 if failed. + * -2 indicate queue is full for this client, client should wait untill + * scheduler consum some queued command. + * -1 other fail. +*/ +int amd_sched_push_job(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *c_entity, + void *data, + struct amd_sched_fence **fence) +{ + struct amd_sched_job *job; + + if (!fence) + return -EINVAL; + job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL); + if (!job) + return -ENOMEM; + job->sched = sched; + job->s_entity = c_entity; + job->data = data; + *fence = amd_sched_fence_create(c_entity); + if ((*fence) == NULL) { + kfree(job); + return -EINVAL; + } + fence_get(&(*fence)->base); + job->s_fence = *fence; + while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *), + &c_entity->queue_lock) != sizeof(void *)) { + /** + * Current context used up all its IB slots + * wait here, or need to check whether GPU is hung + */ + schedule(); + } + /* first job wake up scheduler */ + if ((kfifo_len(&c_entity->job_queue) / sizeof(void *)) == 1) + wake_up_interruptible(&sched->wait_queue); + return 0; +} + +static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_job *sched_job = + container_of(cb, struct amd_sched_job, cb); + struct amd_gpu_scheduler *sched; + unsigned long flags; + + sched = sched_job->sched; + atomic64_set(&sched_job->s_entity->last_signaled_v_seq, + sched_job->s_fence->v_seq); + amd_sched_fence_signal(sched_job->s_fence); + spin_lock_irqsave(&sched->queue_lock, flags); + list_del(&sched_job->list); + atomic64_dec(&sched->hw_rq_count); + spin_unlock_irqrestore(&sched->queue_lock, flags); + + sched->ops->process_job(sched, sched_job); + fence_put(&sched_job->s_fence->base); + kfree(sched_job); + wake_up_interruptible(&sched->wait_queue); +} + +static int amd_sched_main(void *param) +{ + int r; + struct amd_sched_job *job; + struct sched_param sparam = {.sched_priority = 1}; + struct amd_sched_entity *c_entity = NULL; + struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; + + sched_setscheduler(current, SCHED_FIFO, &sparam); + + while (!kthread_should_stop()) { + struct fence *fence; + + wait_event_interruptible(sched->wait_queue, + is_scheduler_ready(sched) && + (c_entity = select_context(sched))); + r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *)); + if (r != sizeof(void *)) + continue; + r = sched->ops->prepare_job(sched, c_entity, job); + if (!r) { + unsigned long flags; + spin_lock_irqsave(&sched->queue_lock, flags); + list_add_tail(&job->list, &sched->active_hw_rq); + atomic64_inc(&sched->hw_rq_count); + spin_unlock_irqrestore(&sched->queue_lock, flags); + } + mutex_lock(&sched->sched_lock); + fence = sched->ops->run_job(sched, c_entity, job); + if (fence) { + r = fence_add_callback(fence, &job->cb, + amd_sched_process_job); + if (r == -ENOENT) + amd_sched_process_job(fence, &job->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", r); + fence_put(fence); + } + mutex_unlock(&sched->sched_lock); + } + return 0; +} + +/** + * Create a gpu scheduler + * + * @device The device context for this scheduler + * @ops The backend operations for this scheduler. + * @id The scheduler is per ring, here is ring id. + * @granularity The minumum ms unit the scheduler will scheduled. + * @preemption Indicate whether this ring support preemption, 0 is no. + * + * return the pointer to scheduler for success, otherwise return NULL +*/ +struct amd_gpu_scheduler *amd_sched_create(void *device, + struct amd_sched_backend_ops *ops, + unsigned ring, + unsigned granularity, + unsigned preemption, + unsigned hw_submission) +{ + struct amd_gpu_scheduler *sched; + char name[20]; + + sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); + if (!sched) + return NULL; + + sched->device = device; + sched->ops = ops; + sched->granularity = granularity; + sched->ring_id = ring; + sched->preemption = preemption; + sched->hw_submission_limit = hw_submission; + snprintf(name, sizeof(name), "gpu_sched[%d]", ring); + mutex_init(&sched->sched_lock); + spin_lock_init(&sched->queue_lock); + amd_sched_rq_init(&sched->sched_rq); + amd_sched_rq_init(&sched->kernel_rq); + + init_waitqueue_head(&sched->wait_queue); + INIT_LIST_HEAD(&sched->active_hw_rq); + atomic64_set(&sched->hw_rq_count, 0); + /* Each scheduler will run on a seperate kernel thread */ + sched->thread = kthread_create(amd_sched_main, sched, name); + if (sched->thread) { + wake_up_process(sched->thread); + return sched; + } + + DRM_ERROR("Failed to create scheduler for id %d.\n", ring); + kfree(sched); + return NULL; +} + +/** + * Destroy a gpu scheduler + * + * @sched The pointer to the scheduler + * + * return 0 if succeed. -1 if failed. + */ +int amd_sched_destroy(struct amd_gpu_scheduler *sched) +{ + kthread_stop(sched->thread); + kfree(sched); + return 0; +} + +/** + * Get next queued sequence number + * + * @entity The context entity + * + * return the next queued sequence number +*/ +uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity) +{ + return atomic64_read(&c_entity->last_queued_v_seq) + 1; +} diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h new file mode 100644 index 000000000000..ceb5918bfbeb --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -0,0 +1,162 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _GPU_SCHEDULER_H_ +#define _GPU_SCHEDULER_H_ + +#include <linux/kfifo.h> +#include <linux/fence.h> + +#define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 + +struct amd_gpu_scheduler; +struct amd_sched_rq; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct amd_sched_entity { + struct list_head list; + struct amd_sched_rq *belongto_rq; + spinlock_t lock; + /* the virtual_seq is unique per context per ring */ + atomic64_t last_queued_v_seq; + atomic64_t last_signaled_v_seq; + /* the job_queue maintains the jobs submitted by clients */ + struct kfifo job_queue; + spinlock_t queue_lock; + struct amd_gpu_scheduler *scheduler; + wait_queue_head_t wait_queue; + wait_queue_head_t wait_emit; + bool is_pending; + uint64_t fence_context; + char name[20]; + bool need_wakeup; +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct amd_sched_rq { + struct mutex lock; + struct list_head entities; + struct amd_sched_entity *current_entity; +}; + +struct amd_sched_fence { + struct fence base; + struct fence_cb cb; + struct amd_sched_entity *entity; + uint64_t v_seq; + spinlock_t lock; +}; + +struct amd_sched_job { + struct list_head list; + struct fence_cb cb; + struct amd_gpu_scheduler *sched; + struct amd_sched_entity *s_entity; + void *data; + struct amd_sched_fence *s_fence; +}; + +extern const struct fence_ops amd_sched_fence_ops; +static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) +{ + struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); + + if (__f->base.ops == &amd_sched_fence_ops) + return __f; + + return NULL; +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct amd_sched_backend_ops { + int (*prepare_job)(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *c_entity, + struct amd_sched_job *job); + struct fence *(*run_job)(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *c_entity, + struct amd_sched_job *job); + void (*process_job)(struct amd_gpu_scheduler *sched, + struct amd_sched_job *job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct amd_gpu_scheduler { + void *device; + struct task_struct *thread; + struct amd_sched_rq sched_rq; + struct amd_sched_rq kernel_rq; + struct list_head active_hw_rq; + atomic64_t hw_rq_count; + struct amd_sched_backend_ops *ops; + uint32_t ring_id; + uint32_t granularity; /* in ms unit */ + uint32_t preemption; + wait_queue_head_t wait_queue; + struct amd_sched_entity *current_entity; + struct mutex sched_lock; + spinlock_t queue_lock; + uint32_t hw_submission_limit; +}; + +struct amd_gpu_scheduler *amd_sched_create(void *device, + struct amd_sched_backend_ops *ops, + uint32_t ring, + uint32_t granularity, + uint32_t preemption, + uint32_t hw_submission); +int amd_sched_destroy(struct amd_gpu_scheduler *sched); + +int amd_sched_push_job(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *c_entity, + void *data, + struct amd_sched_fence **fence); + +int amd_sched_entity_init(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_rq *rq, + uint32_t jobs); +int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity); + +uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity); + +struct amd_sched_fence *amd_sched_fence_create( + struct amd_sched_entity *s_entity); +void amd_sched_fence_signal(struct amd_sched_fence *fence); + + +#endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c new file mode 100644 index 000000000000..a4751598c0b4 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -0,0 +1,78 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <drm/drmP.h> +#include "gpu_scheduler.h" + +struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity) +{ + struct amd_sched_fence *fence = NULL; + fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); + if (fence == NULL) + return NULL; + fence->v_seq = atomic64_inc_return(&s_entity->last_queued_v_seq); + fence->entity = s_entity; + spin_lock_init(&fence->lock); + fence_init(&fence->base, &amd_sched_fence_ops, + &fence->lock, + s_entity->fence_context, + fence->v_seq); + return fence; +} + +void amd_sched_fence_signal(struct amd_sched_fence *fence) +{ + int ret = fence_signal(&fence->base); + if (!ret) + FENCE_TRACE(&fence->base, "signaled from irq context\n"); + else + FENCE_TRACE(&fence->base, "was already signaled\n"); +} + +static const char *amd_sched_fence_get_driver_name(struct fence *fence) +{ + return "amd_sched"; +} + +static const char *amd_sched_fence_get_timeline_name(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + return (const char *)fence->entity->name; +} + +static bool amd_sched_fence_enable_signaling(struct fence *f) +{ + return true; +} + +const struct fence_ops amd_sched_fence_ops = { + .get_driver_name = amd_sched_fence_get_driver_name, + .get_timeline_name = amd_sched_fence_get_timeline_name, + .enable_signaling = amd_sched_fence_enable_signaling, + .signaled = NULL, + .wait = fence_default_wait, + .release = NULL, +}; diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 7838e731b0de..7d03c51abcb9 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -22,9 +22,9 @@ static /*const*/ struct fb_ops armada_fb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -80,18 +80,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh, if (IS_ERR(dfb)) return PTR_ERR(dfb); - info = framebuffer_alloc(0, dev->dev); - if (!info) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(fbh); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto err_fballoc; } - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto err_fbcmap; - } - strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id)); info->par = fbh; info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; @@ -101,7 +95,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh, info->screen_size = obj->obj.size; info->screen_base = ptr; fbh->fb = &dfb->fb; - fbh->fbdev = info; + drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); @@ -111,8 +105,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh, return 0; - err_fbcmap: - framebuffer_release(info); err_fballoc: dfb->fb.funcs->destroy(&dfb->fb); return ret; @@ -171,6 +163,7 @@ int armada_fbdev_init(struct drm_device *dev) return 0; err_fb_setup: + drm_fb_helper_release_fbi(fbh); drm_fb_helper_fini(fbh); err_fb_helper: priv->fbdev = NULL; @@ -191,14 +184,8 @@ void armada_fbdev_fini(struct drm_device *dev) struct drm_fb_helper *fbh = priv->fbdev; if (fbh) { - struct fb_info *info = fbh->fbdev; - - if (info) { - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(fbh); + drm_fb_helper_release_fbi(fbh); drm_fb_helper_fini(fbh); diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index ff68eefae273..f31db28a684b 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -125,7 +125,7 @@ static void ast_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct ast_fbdev *afbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width, rect->height); } @@ -134,7 +134,7 @@ static void ast_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct ast_fbdev *afbdev = info->par; - sys_copyarea(info, area); + drm_fb_helper_sys_copyarea(info, area); ast_dirty_update(afbdev, area->dx, area->dy, area->width, area->height); } @@ -143,7 +143,7 @@ static void ast_imageblit(struct fb_info *info, const struct fb_image *image) { struct ast_fbdev *afbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); ast_dirty_update(afbdev, image->dx, image->dy, image->width, image->height); } @@ -193,7 +193,6 @@ static int astfb_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb; struct fb_info *info; int size, ret; - struct device *device = &dev->pdev->dev; void *sysram; struct drm_gem_object *gobj = NULL; struct ast_bo *bo = NULL; @@ -217,40 +216,28 @@ static int astfb_create(struct drm_fb_helper *helper, if (!sysram) return -ENOMEM; - info = framebuffer_alloc(0, device); - if (!info) { - ret = -ENOMEM; - goto out; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_free_vram; } info->par = afbdev; ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj); if (ret) - goto out; + goto err_release_fbi; afbdev->sysram = sysram; afbdev->size = size; fb = &afbdev->afb.base; afbdev->helper.fb = fb; - afbdev->helper.fbdev = info; strcpy(info->fix.id, "astdrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &astfb_ops; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out; - } - - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out; - } info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0); info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); @@ -266,7 +253,11 @@ static int astfb_create(struct drm_fb_helper *helper, fb->width, fb->height); return 0; -out: + +err_release_fbi: + drm_fb_helper_release_fbi(helper); +err_free_vram: + vfree(afbdev->sysram); return ret; } @@ -297,15 +288,10 @@ static const struct drm_fb_helper_funcs ast_fb_helper_funcs = { static void ast_fbdev_destroy(struct drm_device *dev, struct ast_fbdev *afbdev) { - struct fb_info *info; struct ast_framebuffer *afb = &afbdev->afb; - if (afbdev->helper.fbdev) { - info = afbdev->helper.fbdev; - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + + drm_fb_helper_unregister_fbi(&afbdev->helper); + drm_fb_helper_release_fbi(&afbdev->helper); if (afb->obj) { drm_gem_object_unreference_unlocked(afb->obj); @@ -377,5 +363,5 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state) if (!ast->fbdev) return; - fb_set_suspend(ast->fbdev->helper.fbdev, state); + drm_fb_helper_set_suspend(&ast->fbdev->helper, state); } diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 035dacc93382..838217f8ce7d 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -571,24 +571,18 @@ ast_dumb_mmap_offset(struct drm_file *file, uint64_t *offset) { struct drm_gem_object *obj; - int ret; struct ast_bo *bo; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto out_unlock; - } + if (obj == NULL) + return -ENOENT; bo = gem_to_ast_bo(obj); *offset = ast_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); - ret = 0; -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + drm_gem_object_unreference_unlocked(obj); + + return 0; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 6fad1f9648f3..8bc62ec407f9 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -29,6 +29,115 @@ #define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8 +static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = { + { + .name = "base", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x40, + .id = 0, + .type = ATMEL_HLCDC_BASE_LAYER, + .nconfigs = 5, + .layout = { + .xstride = { 2 }, + .default_color = 3, + .general_config = 4, + }, + }, +}; + +static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = { + .min_width = 0, + .min_height = 0, + .max_width = 1280, + .max_height = 860, + .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers), + .layers = atmel_hlcdc_at91sam9n12_layers, +}; + +static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { + { + .name = "base", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x40, + .id = 0, + .type = ATMEL_HLCDC_BASE_LAYER, + .nconfigs = 5, + .layout = { + .xstride = { 2 }, + .default_color = 3, + .general_config = 4, + .disc_pos = 5, + .disc_size = 6, + }, + }, + { + .name = "overlay1", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x100, + .id = 1, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 10, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .pstride = { 5 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, + { + .name = "high-end-overlay", + .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, + .regs_offset = 0x280, + .id = 2, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 17, + .layout = { + .pos = 2, + .size = 3, + .memsize = 4, + .xstride = { 5, 7 }, + .pstride = { 6, 8 }, + .default_color = 9, + .chroma_key = 10, + .chroma_key_mask = 11, + .general_config = 12, + .csc = 14, + }, + }, + { + .name = "cursor", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x340, + .id = 3, + .type = ATMEL_HLCDC_CURSOR_LAYER, + .nconfigs = 10, + .max_width = 128, + .max_height = 128, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, +}; + +static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = { + .min_width = 0, + .min_height = 0, + .max_width = 800, + .max_height = 600, + .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers), + .layers = atmel_hlcdc_at91sam9x5_layers, +}; + static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { { .name = "base", @@ -132,11 +241,105 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = { .layers = atmel_hlcdc_sama5d3_layers, }; +static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { + { + .name = "base", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x40, + .id = 0, + .type = ATMEL_HLCDC_BASE_LAYER, + .nconfigs = 7, + .layout = { + .xstride = { 2 }, + .default_color = 3, + .general_config = 4, + .disc_pos = 5, + .disc_size = 6, + }, + }, + { + .name = "overlay1", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x140, + .id = 1, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 10, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .pstride = { 5 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, + { + .name = "overlay2", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x240, + .id = 2, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 10, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .pstride = { 5 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, + { + .name = "high-end-overlay", + .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, + .regs_offset = 0x340, + .id = 3, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 42, + .layout = { + .pos = 2, + .size = 3, + .memsize = 4, + .xstride = { 5, 7 }, + .pstride = { 6, 8 }, + .default_color = 9, + .chroma_key = 10, + .chroma_key_mask = 11, + .general_config = 12, + .csc = 14, + }, + }, +}; + +static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = { + .min_width = 0, + .min_height = 0, + .max_width = 2048, + .max_height = 2048, + .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers), + .layers = atmel_hlcdc_sama5d4_layers, +}; static const struct of_device_id atmel_hlcdc_of_match[] = { { + .compatible = "atmel,at91sam9n12-hlcdc", + .data = &atmel_hlcdc_dc_at91sam9n12, + }, + { + .compatible = "atmel,at91sam9x5-hlcdc", + .data = &atmel_hlcdc_dc_at91sam9x5, + }, + { .compatible = "atmel,sama5d3-hlcdc", .data = &atmel_hlcdc_dc_sama5d3, }, + { + .compatible = "atmel,sama5d4-hlcdc", + .data = &atmel_hlcdc_dc_sama5d4, + }, { /* sentinel */ }, }; @@ -485,7 +688,9 @@ static const struct file_operations fops = { }; static struct drm_driver atmel_hlcdc_dc_driver = { - .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, + .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | + DRIVER_MODESET | DRIVER_PRIME | + DRIVER_ATOMIC, .preclose = atmel_hlcdc_dc_preclose, .lastclose = atmel_hlcdc_dc_lastclose, .irq_handler = atmel_hlcdc_dc_irq_handler, @@ -497,6 +702,15 @@ static struct drm_driver atmel_hlcdc_dc_driver = { .disable_vblank = atmel_hlcdc_dc_disable_vblank, .gem_free_object = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, @@ -559,7 +773,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 9c4513005310..067e4c144bd6 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -126,12 +126,16 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder, if (info->num_bus_formats) { switch (info->bus_formats[0]) { + case MEDIA_BUS_FMT_RGB565_1X16: + cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8; + break; case MEDIA_BUS_FMT_RGB666_1X18: cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8; break; case MEDIA_BUS_FMT_RGB888_1X24: cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8; break; + case MEDIA_BUS_FMT_RGB444_1X12: default: break; } diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index 98837bde2d25..7f1a3604b19f 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -109,7 +109,7 @@ static int bochs_pm_suspend(struct device *dev) if (bochs->fb.initialized) { console_lock(); - fb_set_suspend(bochs->fb.helper.fbdev, 1); + drm_fb_helper_set_suspend(&bochs->fb.helper, 1); console_unlock(); } @@ -126,7 +126,7 @@ static int bochs_pm_resume(struct device *dev) if (bochs->fb.initialized) { console_lock(); - fb_set_suspend(bochs->fb.helper.fbdev, 0); + drm_fb_helper_set_suspend(&bochs->fb.helper, 0); console_unlock(); } diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index 976d9798dc99..09a0637aab3e 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -24,9 +24,9 @@ static struct fb_ops bochsfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -56,11 +56,9 @@ static int bochsfb_create(struct drm_fb_helper *helper, { struct bochs_device *bochs = container_of(helper, struct bochs_device, fb.helper); - struct drm_device *dev = bochs->dev; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; - struct device *device = &dev->pdev->dev; struct drm_gem_object *gobj = NULL; struct bochs_bo *bo = NULL; int size, ret; @@ -106,22 +104,23 @@ static int bochsfb_create(struct drm_fb_helper *helper, ttm_bo_unreserve(&bo->bo); /* init fb device */ - info = framebuffer_alloc(0, device); - if (info == NULL) - return -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) + return PTR_ERR(info); info->par = &bochs->fb.helper; ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); - if (ret) + if (ret) { + drm_fb_helper_release_fbi(helper); return ret; + } bochs->fb.size = size; /* setup helper */ fb = &bochs->fb.gfb.base; bochs->fb.helper.fb = fb; - bochs->fb.helper.fbdev = info; strcpy(info->fix.id, "bochsdrmfb"); @@ -139,30 +138,17 @@ static int bochsfb_create(struct drm_fb_helper *helper, info->fix.smem_start = 0; info->fix.smem_len = size; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); - return -ENOMEM; - } - return 0; } static int bochs_fbdev_destroy(struct bochs_device *bochs) { struct bochs_framebuffer *gfb = &bochs->fb.gfb; - struct fb_info *info; DRM_DEBUG_DRIVER("\n"); - if (bochs->fb.helper.fbdev) { - info = bochs->fb.helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&bochs->fb.helper); + drm_fb_helper_release_fbi(&bochs->fb.helper); if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 66286ff518d4..f69e6bf9bb0e 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -454,25 +454,17 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct drm_gem_object *obj; - int ret; struct bochs_bo *bo; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto out_unlock; - } + if (obj == NULL) + return -ENOENT; bo = gem_to_bochs_bo(obj); *offset = bochs_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); - ret = 0; -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; - + drm_gem_object_unreference_unlocked(obj); + return 0; } /* ---------------------------------------------------------------------- */ diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index acef3223772c..2de52a53a803 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -1,24 +1,32 @@ +config DRM_BRIDGE + def_bool y + depends on DRM + help + Bridge registration and lookup framework. + +menu "Display Interface Bridges" + depends on DRM && DRM_BRIDGE + config DRM_DW_HDMI tristate - depends on DRM select DRM_KMS_HELPER -config DRM_PTN3460 - tristate "PTN3460 DP/LVDS bridge" - depends on DRM +config DRM_NXP_PTN3460 + tristate "NXP PTN3460 DP/LVDS bridge" depends on OF select DRM_KMS_HELPER select DRM_PANEL ---help--- - ptn3460 eDP-LVDS bridge chip driver. + NXP PTN3460 eDP-LVDS bridge chip driver. -config DRM_PS8622 +config DRM_PARADE_PS8622 tristate "Parade eDP/LVDS bridge" - depends on DRM depends on OF select DRM_PANEL select DRM_KMS_HELPER select BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE ---help--- - parade eDP-LVDS bridge chip driver. + Parade eDP-LVDS bridge chip driver. + +endmenu diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 8dfebd984370..e2eef1c2f4c3 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,5 +1,5 @@ ccflags-y := -Iinclude/drm -obj-$(CONFIG_DRM_PS8622) += ps8622.o -obj-$(CONFIG_DRM_PTN3460) += ptn3460.o obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o +obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o +obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 1b1bf2384815..1b1bf2384815 100644 --- a/drivers/gpu/drm/bridge/ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 1a6607beb29f..1a6607beb29f 100644 --- a/drivers/gpu/drm/bridge/ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index b9140032962d..b1619e29a564 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -92,7 +92,7 @@ static int cirrus_pm_suspend(struct device *dev) if (cdev->mode_info.gfbdev) { console_lock(); - fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1); + drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1); console_unlock(); } @@ -109,7 +109,7 @@ static int cirrus_pm_resume(struct device *dev) if (cdev->mode_info.gfbdev) { console_lock(); - fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0); + drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0); console_unlock(); } diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 13ddf1c4bb8e..589103bcc06c 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -98,7 +98,7 @@ static void cirrus_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct cirrus_fbdev *afbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width, rect->height); } @@ -107,7 +107,7 @@ static void cirrus_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct cirrus_fbdev *afbdev = info->par; - sys_copyarea(info, area); + drm_fb_helper_sys_copyarea(info, area); cirrus_dirty_update(afbdev, area->dx, area->dy, area->width, area->height); } @@ -116,7 +116,7 @@ static void cirrus_imageblit(struct fb_info *info, const struct fb_image *image) { struct cirrus_fbdev *afbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); cirrus_dirty_update(afbdev, image->dx, image->dy, image->width, image->height); } @@ -165,12 +165,10 @@ static int cirrusfb_create(struct drm_fb_helper *helper, { struct cirrus_fbdev *gfbdev = container_of(helper, struct cirrus_fbdev, helper); - struct drm_device *dev = gfbdev->helper.dev; struct cirrus_device *cdev = gfbdev->helper.dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; - struct device *device = &dev->pdev->dev; void *sysram; struct drm_gem_object *gobj = NULL; struct cirrus_bo *bo = NULL; @@ -195,9 +193,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper, if (!sysram) return -ENOMEM; - info = framebuffer_alloc(0, device); - if (info == NULL) - return -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) + return PTR_ERR(info); info->par = gfbdev; @@ -216,11 +214,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper, /* setup helper */ gfbdev->helper.fb = fb; - gfbdev->helper.fbdev = info; strcpy(info->fix.id, "cirrusdrmfb"); - info->flags = FBINFO_DEFAULT; info->fbops = &cirrusfb_ops; @@ -229,11 +225,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_iounmap; - } info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; info->apertures->ranges[0].size = cdev->mc.vram_size; @@ -246,13 +237,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper, info->fix.mmio_start = 0; info->fix.mmio_len = 0; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); - ret = -ENOMEM; - goto out_iounmap; - } - DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start); DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len); @@ -260,24 +244,15 @@ static int cirrusfb_create(struct drm_fb_helper *helper, DRM_INFO(" pitch is %d\n", fb->pitches[0]); return 0; -out_iounmap: - return ret; } static int cirrus_fbdev_destroy(struct drm_device *dev, struct cirrus_fbdev *gfbdev) { - struct fb_info *info; struct cirrus_framebuffer *gfb = &gfbdev->gfb; - if (gfbdev->helper.fbdev) { - info = gfbdev->helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&gfbdev->helper); + drm_fb_helper_release_fbi(&gfbdev->helper); if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index e4b976658087..055fd86ba717 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -293,25 +293,18 @@ cirrus_dumb_mmap_offset(struct drm_file *file, uint64_t *offset) { struct drm_gem_object *obj; - int ret; struct cirrus_bo *bo; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto out_unlock; - } + if (obj == NULL) + return -ENOENT; bo = gem_to_cirrus_bo(obj); *offset = cirrus_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); - ret = 0; -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + drm_gem_object_unreference_unlocked(obj); + return 0; } bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3efd91c0c6cb..1066e4b658cf 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -153,9 +153,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) if (!connector) continue; - WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - - connector->funcs->atomic_destroy_state(connector, + /* + * FIXME: Async commits can race with connector unplugging and + * there's currently nothing that prevents cleanup up state for + * deleted connectors. As long as the callback doesn't look at + * the connector we'll be fine though, so make sure that's the + * case by setting all connector pointers to NULL. + */ + state->connector_states[i]->connector = NULL; + connector->funcs->atomic_destroy_state(NULL, state->connector_states[i]); state->connectors[i] = NULL; state->connector_states[i] = NULL; @@ -1224,6 +1230,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) } } + if (ret == 0) + ww_acquire_done(&state->acquire_ctx->ww_ctx); + return ret; } EXPORT_SYMBOL(drm_atomic_check_only); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index cf27b6b605d8..d432348837a5 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -307,7 +307,7 @@ mode_fixup(struct drm_atomic_state *state) encoder->base.id, encoder->name); return ret; } - } else { + } else if (funcs->mode_fixup) { ret = funcs->mode_fixup(encoder, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { @@ -966,7 +966,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, continue; old_crtc_state->enable = true; - old_crtc_state->last_vblank_count = drm_vblank_count(dev, i); + old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc); } for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { @@ -975,7 +975,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, ret = wait_event_timeout(dev->vblank[i].queue, old_crtc_state->last_vblank_count != - drm_vblank_count(dev, i), + drm_crtc_vblank_count(crtc), msecs_to_jiffies(50)); drm_crtc_vblank_put(crtc); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index ca077657604e..33d877c65ced 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1151,7 +1151,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup); int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, - const uint32_t *formats, uint32_t format_count, + const uint32_t *formats, unsigned int format_count, enum drm_plane_type type) { struct drm_mode_config *config = &dev->mode_config; @@ -1225,7 +1225,7 @@ EXPORT_SYMBOL(drm_universal_plane_init); int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, - const uint32_t *formats, uint32_t format_count, + const uint32_t *formats, unsigned int format_count, bool is_primary) { enum drm_plane_type type; @@ -5273,9 +5273,11 @@ void drm_mode_config_reset(struct drm_device *dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); + mutex_lock(&dev->mode_config.mutex); drm_for_each_connector(connector, dev) if (connector->funcs->reset) connector->funcs->reset(connector); + mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b0487c9f018c..e23df5fd3836 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref) from an EDID retrieval */ if (port->connector) { mutex_lock(&mgr->destroy_connector_lock); - list_add(&port->connector->destroy_list, &mgr->destroy_connector_list); + list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); + return; } drm_dp_port_teardown_pdt(port, port->pdt); @@ -2631,6 +2632,16 @@ void drm_dp_mst_dump_topology(struct seq_file *m, seq_printf(m, "%02x ", buf[i]); seq_printf(m, "\n"); + /* dump the standard OUI branch header */ + ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); + seq_printf(m, "branch oui: "); + for (i = 0; i < 0x3; i++) + seq_printf(m, "%02x", buf[i]); + seq_printf(m, " devid: "); + for (i = 0x3; i < 0x8; i++) + seq_printf(m, "%c", buf[i]); + seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); + seq_printf(m, "\n"); bret = dump_dp_payload_table(mgr, buf); if (bret == true) { seq_printf(m, "payload table: "); @@ -2659,7 +2670,7 @@ static void drm_dp_tx_work(struct work_struct *work) static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); - struct drm_connector *connector; + struct drm_dp_mst_port *port; /* * Not a regular list traverse as we have to drop the destroy @@ -2668,15 +2679,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) */ for (;;) { mutex_lock(&mgr->destroy_connector_lock); - connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list); - if (!connector) { + port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); + if (!port) { mutex_unlock(&mgr->destroy_connector_lock); break; } - list_del(&connector->destroy_list); + list_del(&port->next); mutex_unlock(&mgr->destroy_connector_lock); - mgr->cbs->destroy_connector(mgr, connector); + mgr->cbs->destroy_connector(mgr, port->connector); + + drm_dp_port_teardown_pdt(port, port->pdt); + + if (!port->input && port->vcpi.vcpi > 0) + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + kfree(port); } } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index e6e05bb75a77..05bb7311ac5d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3802,7 +3802,7 @@ int drm_add_modes_noedid(struct drm_connector *connector, struct drm_display_mode *mode; struct drm_device *dev = connector->dev; - count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); + count = ARRAY_SIZE(drm_dmt_modes); if (hdisplay < 0) hdisplay = 0; if (vdisplay < 0) diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index f01dc25df2dc..c19a62561183 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -222,9 +222,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); static struct fb_ops drm_fbdev_cma_ops = { .owner = THIS_MODULE, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, @@ -263,10 +263,9 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, if (IS_ERR(obj)) return -ENOMEM; - fbi = framebuffer_alloc(0, dev->dev); - if (!fbi) { - dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); - ret = -ENOMEM; + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { + ret = PTR_ERR(fbi); goto err_drm_gem_cma_free_object; } @@ -274,23 +273,16 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, if (IS_ERR(fbdev_cma->fb)) { dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); ret = PTR_ERR(fbdev_cma->fb); - goto err_framebuffer_release; + goto err_fb_info_destroy; } fb = &fbdev_cma->fb->fb; helper->fb = fb; - helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &drm_fbdev_cma_ops; - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - dev_err(dev->dev, "Failed to allocate color map.\n"); - goto err_drm_fb_cma_destroy; - } - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); @@ -305,11 +297,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, return 0; -err_drm_fb_cma_destroy: - drm_framebuffer_unregister_private(fb); - drm_fb_cma_destroy(fb); -err_framebuffer_release: - framebuffer_release(fbi); +err_fb_info_destroy: + drm_fb_helper_release_fbi(helper); err_drm_gem_cma_free_object: drm_gem_cma_free_object(&obj->base); return ret; @@ -385,20 +374,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); */ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) { - if (fbdev_cma->fb_helper.fbdev) { - struct fb_info *info; - int ret; - - info = fbdev_cma->fb_helper.fbdev; - ret = unregister_framebuffer(info); - if (ret < 0) - DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); - - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper); + drm_fb_helper_release_fbi(&fbdev_cma->fb_helper); if (fbdev_cma->fb) { drm_framebuffer_unregister_private(&fbdev_cma->fb->fb); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 73f90f7e2f74..418d299f3b12 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -56,8 +56,8 @@ static LIST_HEAD(kernel_fb_helper_list); * Teardown is done with drm_fb_helper_fini(). * * At runtime drivers should restore the fbdev console by calling - * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They - * should also notify the fb helper code from updates to the output + * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback. + * They should also notify the fb helper code from updates to the output * configuration by calling drm_fb_helper_hotplug_event(). For easier * integration with the output polling code in drm_crtc_helper.c the modeset * code provides a ->output_poll_changed callback. @@ -168,11 +168,14 @@ static void remove_from_modeset(struct drm_mode_set *set, } set->num_connectors--; - /* because i915 is pissy about this.. + /* * TODO maybe need to makes sure we set it back to !=NULL somewhere? */ - if (set->num_connectors == 0) + if (set->num_connectors == 0) { set->fb = NULL; + drm_mode_destroy(connector->dev, set->mode); + set->mode = NULL; + } } int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, @@ -354,21 +357,6 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) } return error; } -/** - * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration - * @fb_helper: fbcon to restore - * - * This should be called from driver's drm ->lastclose callback - * when implementing an fbcon on top of kms using this helper. This ensures that - * the user isn't greeted with a black screen when e.g. X dies. - * - * Use this variant if you need to bypass locking (panic), or already - * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked() - */ -static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) -{ - return restore_fbdev_mode(fb_helper); -} /** * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration @@ -398,42 +386,6 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); -/* - * restore fbcon display for all kms driver's using this helper, used for sysrq - * and panic handling. - */ -static bool drm_fb_helper_force_kernel_mode(void) -{ - bool ret, error = false; - struct drm_fb_helper *helper; - - if (list_empty(&kernel_fb_helper_list)) - return false; - - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { - struct drm_device *dev = helper->dev; - - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - continue; - - /* - * NOTE: Use trylock mode to avoid deadlocks and sleeping in - * panic context. - */ - if (__drm_modeset_lock_all(dev, true) != 0) { - error = true; - continue; - } - - ret = drm_fb_helper_restore_fbdev_mode(helper); - if (ret) - error = true; - - drm_modeset_unlock_all(dev); - } - return error; -} - static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; @@ -459,6 +411,33 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) } #ifdef CONFIG_MAGIC_SYSRQ +/* + * restore fbcon display for all kms driver's using this helper, used for sysrq + * and panic handling. + */ +static bool drm_fb_helper_force_kernel_mode(void) +{ + bool ret, error = false; + struct drm_fb_helper *helper; + + if (list_empty(&kernel_fb_helper_list)) + return false; + + list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { + struct drm_device *dev = helper->dev; + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + continue; + + drm_modeset_lock_all(dev); + ret = restore_fbdev_mode(helper); + if (ret) + error = true; + drm_modeset_unlock_all(dev); + } + return error; +} + static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) { bool ret; @@ -491,14 +470,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) int i, j; /* - * fbdev->blank can be called from irq context in case of a panic. - * Since we already have our own special panic handler which will - * restore the fbdev console mode completely, just bail out early. - */ - if (oops_in_progress) - return; - - /* * For each CRTC in this fb, turn the connectors on/off. */ drm_modeset_lock_all(dev); @@ -531,6 +502,9 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) */ int drm_fb_helper_blank(int blank, struct fb_info *info) { + if (oops_in_progress) + return -EBUSY; + switch (blank) { /* Display: On; HSync: On, VSync: On */ case FB_BLANK_UNBLANK: @@ -654,6 +628,86 @@ out_free: } EXPORT_SYMBOL(drm_fb_helper_init); +/** + * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members + * @fb_helper: driver-allocated fbdev helper + * + * A helper to alloc fb_info and the members cmap and apertures. Called + * by the driver within the fb_probe fb_helper callback function. + * + * RETURNS: + * fb_info pointer if things went okay, pointer containing error code + * otherwise + */ +struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +{ + struct device *dev = fb_helper->dev->dev; + struct fb_info *info; + int ret; + + info = framebuffer_alloc(0, dev); + if (!info) + return ERR_PTR(-ENOMEM); + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) + goto err_release; + + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto err_free_cmap; + } + + fb_helper->fbdev = info; + + return info; + +err_free_cmap: + fb_dealloc_cmap(&info->cmap); +err_release: + framebuffer_release(info); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); + +/** + * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device + * @fb_helper: driver-allocated fbdev helper + * + * A wrapper around unregister_framebuffer, to release the fb_info + * framebuffer device + */ +void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper && fb_helper->fbdev) + unregister_framebuffer(fb_helper->fbdev); +} +EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); + +/** + * drm_fb_helper_release_fbi - dealloc fb_info and its members + * @fb_helper: driver-allocated fbdev helper + * + * A helper to free memory taken by fb_info and the members cmap and + * apertures + */ +void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper) { + struct fb_info *info = fb_helper->fbdev; + + if (info) { + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + + fb_helper->fbdev = NULL; + } +} +EXPORT_SYMBOL(drm_fb_helper_release_fbi); + void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) { if (!list_empty(&fb_helper->kernel_fb_list)) { @@ -668,6 +722,149 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_fini); +/** + * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer + * @fb_helper: driver-allocated fbdev helper + * + * A wrapper around unlink_framebuffer implemented by fbdev core + */ +void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper && fb_helper->fbdev) + unlink_framebuffer(fb_helper->fbdev); +} +EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); + +/** + * drm_fb_helper_sys_read - wrapper around fb_sys_read + * @info: fb_info struct pointer + * @buf: userspace buffer to read from framebuffer memory + * @count: number of bytes to read from framebuffer memory + * @ppos: read offset within framebuffer memory + * + * A wrapper around fb_sys_read implemented by fbdev core + */ +ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos) +{ + return fb_sys_read(info, buf, count, ppos); +} +EXPORT_SYMBOL(drm_fb_helper_sys_read); + +/** + * drm_fb_helper_sys_write - wrapper around fb_sys_write + * @info: fb_info struct pointer + * @buf: userspace buffer to write to framebuffer memory + * @count: number of bytes to write to framebuffer memory + * @ppos: write offset within framebuffer memory + * + * A wrapper around fb_sys_write implemented by fbdev core + */ +ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + return fb_sys_write(info, buf, count, ppos); +} +EXPORT_SYMBOL(drm_fb_helper_sys_write); + +/** + * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect + * @info: fbdev registered by the helper + * @rect: info about rectangle to fill + * + * A wrapper around sys_fillrect implemented by fbdev core + */ +void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + sys_fillrect(info, rect); +} +EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); + +/** + * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea + * @info: fbdev registered by the helper + * @area: info about area to copy + * + * A wrapper around sys_copyarea implemented by fbdev core + */ +void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + sys_copyarea(info, area); +} +EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); + +/** + * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit + * @info: fbdev registered by the helper + * @image: info about image to blit + * + * A wrapper around sys_imageblit implemented by fbdev core + */ +void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + sys_imageblit(info, image); +} +EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); + +/** + * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect + * @info: fbdev registered by the helper + * @rect: info about rectangle to fill + * + * A wrapper around cfb_imageblit implemented by fbdev core + */ +void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + cfb_fillrect(info, rect); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); + +/** + * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea + * @info: fbdev registered by the helper + * @area: info about area to copy + * + * A wrapper around cfb_copyarea implemented by fbdev core + */ +void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + cfb_copyarea(info, area); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); + +/** + * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit + * @info: fbdev registered by the helper + * @image: info about image to blit + * + * A wrapper around cfb_imageblit implemented by fbdev core + */ +void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + cfb_imageblit(info, image); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); + +/** + * drm_fb_helper_set_suspend - wrapper around fb_set_suspend + * @fb_helper: driver-allocated fbdev helper + * @state: desired state, zero to resume, non-zero to suspend + * + * A wrapper around fb_set_suspend implemented by fbdev core + */ +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state) +{ + if (fb_helper && fb_helper->fbdev) + fb_set_suspend(fb_helper->fbdev, state); +} +EXPORT_SYMBOL(drm_fb_helper_set_suspend); + static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { @@ -755,9 +952,10 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) int i, j, rc = 0; int start; - if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + if (oops_in_progress) return -EBUSY; - } + + drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; @@ -906,6 +1104,9 @@ int drm_fb_helper_set_par(struct fb_info *info) struct drm_fb_helper *fb_helper = info->par; struct fb_var_screeninfo *var = &info->var; + if (oops_in_progress) + return -EBUSY; + if (var->pixclock != 0) { DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; @@ -931,9 +1132,10 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, int ret = 0; int i; - if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + if (oops_in_progress) return -EBUSY; - } + + drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 27a4228b4343..3c2d4abd71c5 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -766,7 +766,7 @@ drm_gem_object_free(struct kref *kref) struct drm_gem_object *obj = (struct drm_gem_object *) kref; struct drm_device *dev = obj->dev; - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); if (dev->driver->gem_free_object != NULL) dev->driver->gem_free_object(obj); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 9edad11dca98..86cc793cdf79 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -289,20 +289,15 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, { struct drm_gem_object *gem_obj; - mutex_lock(&drm->struct_mutex); - gem_obj = drm_gem_object_lookup(drm, file_priv, handle); if (!gem_obj) { dev_err(drm->dev, "failed to lookup GEM object\n"); - mutex_unlock(&drm->struct_mutex); return -EINVAL; } *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_unreference(gem_obj); - - mutex_unlock(&drm->struct_mutex); + drm_gem_object_unreference_unlocked(gem_obj); return 0; } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index b1d303fa2327..9a860ca1e9d7 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -480,7 +480,7 @@ static int drm_version(struct drm_device *dev, void *data, * indicated permissions. If so, returns zero. Otherwise returns an * error code suitable for ioctl return. */ -static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) +int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) { /* ROOT_ONLY is only for CAP_SYS_ADMIN */ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) @@ -508,6 +508,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) return 0; } +EXPORT_SYMBOL(drm_ioctl_permit); #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ [DRM_IOCTL_NR(ioctl)] = { \ diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index ee14324522ce..22d207e211e7 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -43,8 +43,8 @@ #include <linux/export.h> /* Access macro for slots in vblank timestamp ringbuffer. */ -#define vblanktimestamp(dev, crtc, count) \ - ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE]) +#define vblanktimestamp(dev, pipe, count) \ + ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE]) /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. @@ -57,7 +57,7 @@ #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 static bool -drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, +drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, struct timeval *tvblank, unsigned flags); static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ @@ -107,7 +107,7 @@ static void store_vblank(struct drm_device *dev, int crtc, /** * drm_update_vblank_count - update the master vblank counter * @dev: DRM device - * @crtc: counter to update + * @pipe: counter to update * * Call back into the driver to update the appropriate vblank counter * (specified by @crtc). Deal with wraparound, if it occurred, and @@ -120,9 +120,9 @@ static void store_vblank(struct drm_device *dev, int crtc, * Note: caller must hold dev->vbl_lock since this reads & writes * device vblank fields. */ -static void drm_update_vblank_count(struct drm_device *dev, int crtc) +static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; u32 cur_vblank, diff; bool rc; struct timeval t_vblank; @@ -140,21 +140,21 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) * corresponding vblank timestamp. */ do { - cur_vblank = dev->driver->get_vblank_counter(dev, crtc); - rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); - } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); + cur_vblank = dev->driver->get_vblank_counter(dev, pipe); + rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0); + } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe)); /* Deal with counter wrap */ diff = cur_vblank - vblank->last; if (cur_vblank < vblank->last) { diff += dev->max_vblank_count + 1; - DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", - crtc, vblank->last, cur_vblank, diff); + DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n", + pipe, vblank->last, cur_vblank, diff); } - DRM_DEBUG("updating vblank count on crtc %d, missed %d\n", - crtc, diff); + DRM_DEBUG("updating vblank count on crtc %u, missed %d\n", + pipe, diff); if (diff == 0) return; @@ -167,7 +167,7 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) if (!rc) t_vblank = (struct timeval) {0, 0}; - store_vblank(dev, crtc, diff, &t_vblank); + store_vblank(dev, pipe, diff, &t_vblank); } /* @@ -176,9 +176,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) * are preserved, even if there are any spurious vblank irq's after * disable. */ -static void vblank_disable_and_save(struct drm_device *dev, int crtc) +static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; u32 vblcount; s64 diff_ns; @@ -206,8 +206,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * vblank interrupt is disabled. */ if (!vblank->enabled && - drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) { - drm_update_vblank_count(dev, crtc); + drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) { + drm_update_vblank_count(dev, pipe); spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); return; } @@ -218,7 +218,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * hardware potentially runtime suspended. */ if (vblank->enabled) { - dev->driver->disable_vblank(dev, crtc); + dev->driver->disable_vblank(dev, pipe); vblank->enabled = false; } @@ -235,9 +235,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * delayed gpu counter increment. */ do { - vblank->last = dev->driver->get_vblank_counter(dev, crtc); - vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); - } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); + vblank->last = dev->driver->get_vblank_counter(dev, pipe); + vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0); + } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc); if (!count) vblrc = 0; @@ -247,7 +247,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) */ vblcount = vblank->count; diff_ns = timeval_to_ns(&tvblank) - - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount)); /* If there is at least 1 msec difference between the last stored * timestamp and tvblank, then we are currently executing our @@ -262,7 +262,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * hope for the best. */ if (vblrc && (abs64(diff_ns) > 1000000)) - store_vblank(dev, crtc, 1, &tvblank); + store_vblank(dev, pipe, 1, &tvblank); spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); } @@ -271,16 +271,16 @@ static void vblank_disable_fn(unsigned long arg) { struct drm_vblank_crtc *vblank = (void *)arg; struct drm_device *dev = vblank->dev; + unsigned int pipe = vblank->pipe; unsigned long irqflags; - int crtc = vblank->crtc; if (!dev->vblank_disable_allowed) return; spin_lock_irqsave(&dev->vbl_lock, irqflags); if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) { - DRM_DEBUG("disabling vblank on crtc %d\n", crtc); - vblank_disable_and_save(dev, crtc); + DRM_DEBUG("disabling vblank on crtc %u\n", pipe); + vblank_disable_and_save(dev, pipe); } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } @@ -293,14 +293,14 @@ static void vblank_disable_fn(unsigned long arg) */ void drm_vblank_cleanup(struct drm_device *dev) { - int crtc; + unsigned int pipe; /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) return; - for (crtc = 0; crtc < dev->num_crtcs; crtc++) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + for (pipe = 0; pipe < dev->num_crtcs; pipe++) { + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; WARN_ON(vblank->enabled && drm_core_check_feature(dev, DRIVER_MODESET)); @@ -316,17 +316,18 @@ EXPORT_SYMBOL(drm_vblank_cleanup); /** * drm_vblank_init - initialize vblank support - * @dev: drm_device - * @num_crtcs: number of crtcs supported by @dev + * @dev: DRM device + * @num_crtcs: number of CRTCs supported by @dev * * This function initializes vblank support for @num_crtcs display pipelines. * * Returns: * Zero on success or a negative error code on failure. */ -int drm_vblank_init(struct drm_device *dev, int num_crtcs) +int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) { - int i, ret = -ENOMEM; + int ret = -ENOMEM; + unsigned int i; spin_lock_init(&dev->vbl_lock); spin_lock_init(&dev->vblank_time_lock); @@ -341,7 +342,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) struct drm_vblank_crtc *vblank = &dev->vblank[i]; vblank->dev = dev; - vblank->crtc = i; + vblank->pipe = i; init_waitqueue_head(&vblank->queue); setup_timer(&vblank->disable_timer, vblank_disable_fn, (unsigned long)vblank); @@ -624,17 +625,17 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc, if (mode->flags & DRM_MODE_FLAG_INTERLACE) framedur_ns /= 2; } else - DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", + DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n", crtc->base.id); crtc->pixeldur_ns = pixeldur_ns; crtc->linedur_ns = linedur_ns; crtc->framedur_ns = framedur_ns; - DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", + DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n", crtc->base.id, mode->crtc_htotal, mode->crtc_vtotal, mode->crtc_vdisplay); - DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", + DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n", crtc->base.id, dotclock, framedur_ns, linedur_ns, pixeldur_ns); } @@ -643,7 +644,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); /** * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper * @dev: DRM device - * @crtc: Which CRTC's vblank timestamp to retrieve + * @pipe: index of CRTC whose vblank timestamp to retrieve * @max_error: Desired maximum allowable error in timestamps (nanosecs) * On return contains true maximum error of timestamp * @vblank_time: Pointer to struct timeval which should receive the timestamp @@ -686,7 +687,8 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. * */ -int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, +int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, + unsigned int pipe, int *max_error, struct timeval *vblank_time, unsigned flags, @@ -700,8 +702,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; bool invbl; - if (crtc < 0 || crtc >= dev->num_crtcs) { - DRM_ERROR("Invalid crtc %d\n", crtc); + if (pipe >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %u\n", pipe); return -EINVAL; } @@ -720,7 +722,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, * Happens during initial modesetting of a crtc. */ if (framedur_ns == 0) { - DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); + DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe); return -EAGAIN; } @@ -736,13 +738,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, * Get vertical and horizontal scanout position vpos, hpos, * and bounding timestamps stime, etime, pre/post query. */ - vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, + vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos, &hpos, &stime, &etime); /* Return as no-op if scanout query unsupported or failed. */ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { - DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", - crtc, vbl_status); + DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n", + pipe, vbl_status); return -EIO; } @@ -756,8 +758,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, /* Noisy system timing? */ if (i == DRM_TIMESTAMP_MAXRETRIES) { - DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", - crtc, duration_ns/1000, *max_error/1000, i); + DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n", + pipe, duration_ns/1000, *max_error/1000, i); } /* Return upper bound of timestamp precision error. */ @@ -790,8 +792,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, etime = ktime_sub_ns(etime, delta_ns); *vblank_time = ktime_to_timeval(etime); - DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", - crtc, (int)vbl_status, hpos, vpos, + DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", + pipe, (int)vbl_status, hpos, vpos, (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, duration_ns/1000, i); @@ -816,7 +818,7 @@ static struct timeval get_drm_timestamp(void) * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent * vblank interval * @dev: DRM device - * @crtc: which CRTC's vblank timestamp to retrieve + * @pipe: index of CRTC whose vblank timestamp to retrieve * @tvblank: Pointer to target struct timeval which should receive the timestamp * @flags: Flags to pass to driver: * 0 = Default, @@ -833,7 +835,7 @@ static struct timeval get_drm_timestamp(void) * True if timestamp is considered to be very precise, false otherwise. */ static bool -drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, +drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, struct timeval *tvblank, unsigned flags) { int ret; @@ -843,7 +845,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, /* Query driver if possible and precision timestamping enabled. */ if (dev->driver->get_vblank_timestamp && (max_error > 0)) { - ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, + ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error, tvblank, flags); if (ret > 0) return true; @@ -860,7 +862,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, /** * drm_vblank_count - retrieve "cooked" vblank counter value * @dev: DRM device - * @crtc: which counter to retrieve + * @pipe: index of CRTC for which to retrieve the counter * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to @@ -871,12 +873,13 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, * Returns: * The software vblank counter. */ -u32 drm_vblank_count(struct drm_device *dev, int crtc) +u32 drm_vblank_count(struct drm_device *dev, int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return 0; + return vblank->count; } EXPORT_SYMBOL(drm_vblank_count); @@ -901,11 +904,10 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc) EXPORT_SYMBOL(drm_crtc_vblank_count); /** - * drm_vblank_count_and_time - retrieve "cooked" vblank counter value - * and the system timestamp corresponding to that vblank counter value. - * + * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the + * system timestamp corresponding to that vblank counter value. * @dev: DRM device - * @crtc: which counter to retrieve + * @pipe: index of CRTC whose counter to retrieve * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. * * Fetches the "cooked" vblank count value that represents the number of @@ -913,13 +915,13 @@ EXPORT_SYMBOL(drm_crtc_vblank_count); * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. */ -u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, +u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, struct timeval *vblanktime) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; u32 cur_vblank; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return 0; /* @@ -930,7 +932,7 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, do { cur_vblank = vblank->count; smp_rmb(); - *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); + *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); smp_rmb(); } while (cur_vblank != vblank->count); @@ -957,7 +959,7 @@ static void send_vblank_event(struct drm_device *dev, /** * drm_send_vblank_event - helper to send vblank event after pageflip * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * @e: the event to send * * Updates sequence # and timestamp on event, and sends it to userspace. @@ -965,20 +967,20 @@ static void send_vblank_event(struct drm_device *dev, * * This is the legacy version of drm_crtc_send_vblank_event(). */ -void drm_send_vblank_event(struct drm_device *dev, int crtc, - struct drm_pending_vblank_event *e) +void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e) { struct timeval now; unsigned int seq; - if (crtc >= 0) { - seq = drm_vblank_count_and_time(dev, crtc, &now); + if (dev->num_crtcs > 0) { + seq = drm_vblank_count_and_time(dev, pipe, &now); } else { seq = 0; now = get_drm_timestamp(); } - e->pipe = crtc; + e->pipe = pipe; send_vblank_event(dev, e, seq, &now); } EXPORT_SYMBOL(drm_send_vblank_event); @@ -1003,11 +1005,14 @@ EXPORT_SYMBOL(drm_crtc_send_vblank_event); /** * drm_vblank_enable - enable the vblank interrupt on a CRTC * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index + * + * Returns: + * Zero on success or a negative error code on failure. */ -static int drm_vblank_enable(struct drm_device *dev, int crtc) +static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; int ret = 0; assert_spin_locked(&dev->vbl_lock); @@ -1022,13 +1027,13 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc) * timestamps. Filtercode in drm_handle_vblank() will * prevent double-accounting of same vblank interval. */ - ret = dev->driver->enable_vblank(dev, crtc); - DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); + ret = dev->driver->enable_vblank(dev, pipe); + DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret); if (ret) atomic_dec(&vblank->refcount); else { vblank->enabled = true; - drm_update_vblank_count(dev, crtc); + drm_update_vblank_count(dev, pipe); } } @@ -1040,7 +1045,7 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc) /** * drm_vblank_get - get a reference count on vblank events * @dev: DRM device - * @crtc: which CRTC to own + * @pipe: index of CRTC to own * * Acquire a reference count on vblank events to avoid having them disabled * while in use. @@ -1048,24 +1053,24 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc) * This is the legacy version of drm_crtc_vblank_get(). * * Returns: - * Zero on success, nonzero on failure. + * Zero on success or a negative error code on failure. */ -int drm_vblank_get(struct drm_device *dev, int crtc) +int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; int ret = 0; if (!dev->num_crtcs) return -EINVAL; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return -EINVAL; spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &vblank->refcount) == 1) { - ret = drm_vblank_enable(dev, crtc); + ret = drm_vblank_enable(dev, pipe); } else { if (!vblank->enabled) { atomic_dec(&vblank->refcount); @@ -1088,7 +1093,7 @@ EXPORT_SYMBOL(drm_vblank_get); * This is the native kms version of drm_vblank_get(). * * Returns: - * Zero on success, nonzero on failure. + * Zero on success or a negative error code on failure. */ int drm_crtc_vblank_get(struct drm_crtc *crtc) { @@ -1097,23 +1102,23 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc) EXPORT_SYMBOL(drm_crtc_vblank_get); /** - * drm_vblank_put - give up ownership of vblank events + * drm_vblank_put - release ownership of vblank events * @dev: DRM device - * @crtc: which counter to give up + * @pipe: index of CRTC to release * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. * * This is the legacy version of drm_crtc_vblank_put(). */ -void drm_vblank_put(struct drm_device *dev, int crtc) +void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - if (WARN_ON(atomic_read(&vblank->refcount) == 0)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(atomic_read(&vblank->refcount) == 0)) return; /* Last user schedules interrupt disable */ @@ -1147,30 +1152,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put); /** * drm_wait_one_vblank - wait for one vblank * @dev: DRM device - * @crtc: crtc index + * @pipe: CRTC index * * This waits for one vblank to pass on @crtc, using the irq driver interfaces. * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. * due to lack of driver support or because the crtc is off. */ -void drm_wait_one_vblank(struct drm_device *dev, int crtc) +void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) { + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; int ret; u32 last; - ret = drm_vblank_get(dev, crtc); - if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; - last = drm_vblank_count(dev, crtc); + ret = drm_vblank_get(dev, pipe); + if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret)) + return; - ret = wait_event_timeout(dev->vblank[crtc].queue, - last != drm_vblank_count(dev, crtc), + last = drm_vblank_count(dev, pipe); + + ret = wait_event_timeout(vblank->queue, + last != drm_vblank_count(dev, pipe), msecs_to_jiffies(100)); - WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc); + WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe); - drm_vblank_put(dev, crtc); + drm_vblank_put(dev, pipe); } EXPORT_SYMBOL(drm_wait_one_vblank); @@ -1191,7 +1200,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank); /** * drm_vblank_off - disable vblank events on a CRTC * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * Drivers can use this function to shut down the vblank interrupt handling when * disabling a crtc. This function ensures that the latest vblank frame count is @@ -1202,21 +1211,21 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank); * * This is the legacy version of drm_crtc_vblank_off(). */ -void drm_vblank_off(struct drm_device *dev, int crtc) +void drm_vblank_off(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long irqflags; unsigned int seq; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; spin_lock_irqsave(&dev->event_lock, irqflags); spin_lock(&dev->vbl_lock); - vblank_disable_and_save(dev, crtc); + vblank_disable_and_save(dev, pipe); wake_up(&vblank->queue); /* @@ -1230,16 +1239,16 @@ void drm_vblank_off(struct drm_device *dev, int crtc) spin_unlock(&dev->vbl_lock); /* Send any queued vblank events, lest the natives grow disquiet */ - seq = drm_vblank_count_and_time(dev, crtc, &now); + seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { - if (e->pipe != crtc) + if (e->pipe != pipe) continue; DRM_DEBUG("Sending premature vblank event on disable: \ wanted %d, current %d\n", e->event.sequence, seq); list_del(&e->base.link); - drm_vblank_put(dev, e->pipe); + drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, &now); } spin_unlock_irqrestore(&dev->event_lock, irqflags); @@ -1300,7 +1309,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset); /** * drm_vblank_on - enable vblank events on a CRTC * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * This functions restores the vblank interrupt state captured with * drm_vblank_off() again. Note that calls to drm_vblank_on() and @@ -1309,12 +1318,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset); * * This is the legacy version of drm_crtc_vblank_on(). */ -void drm_vblank_on(struct drm_device *dev, int crtc) +void drm_vblank_on(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -1332,7 +1341,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc) * vblank counter value before and after a modeset */ vblank->last = - (dev->driver->get_vblank_counter(dev, crtc) - 1) & + (dev->driver->get_vblank_counter(dev, pipe) - 1) & dev->max_vblank_count; /* * re-enable interrupts if there are users left, or the @@ -1340,7 +1349,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc) */ if (atomic_read(&vblank->refcount) != 0 || (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0)) - WARN_ON(drm_vblank_enable(dev, crtc)); + WARN_ON(drm_vblank_enable(dev, pipe)); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } EXPORT_SYMBOL(drm_vblank_on); @@ -1365,7 +1374,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_on); /** * drm_vblank_pre_modeset - account for vblanks across mode sets * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * Account for vblank events across mode setting events, which will likely * reset the hardware frame counter. @@ -1385,15 +1394,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on); * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc * again. */ -void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) +void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; /* vblank is not initialized (IRQ not installed ?), or has been freed */ if (!dev->num_crtcs) return; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; /* @@ -1405,7 +1414,7 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) */ if (!vblank->inmodeset) { vblank->inmodeset = 0x1; - if (drm_vblank_get(dev, crtc) == 0) + if (drm_vblank_get(dev, pipe) == 0) vblank->inmodeset |= 0x2; } } @@ -1414,27 +1423,30 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset); /** * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * This function again drops the temporary vblank reference acquired in * drm_vblank_pre_modeset. */ -void drm_vblank_post_modeset(struct drm_device *dev, int crtc) +void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; /* vblank is not initialized (IRQ not installed ?), or has been freed */ if (!dev->num_crtcs) return; + if (WARN_ON(pipe >= dev->num_crtcs)) + return; + if (vblank->inmodeset) { spin_lock_irqsave(&dev->vbl_lock, irqflags); dev->vblank_disable_allowed = true; spin_unlock_irqrestore(&dev->vbl_lock, irqflags); if (vblank->inmodeset & 0x2) - drm_vblank_put(dev, crtc); + drm_vblank_put(dev, pipe); vblank->inmodeset = 0; } @@ -1456,7 +1468,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - unsigned int crtc; + unsigned int pipe; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) @@ -1466,16 +1478,16 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; - crtc = modeset->crtc; - if (crtc >= dev->num_crtcs) + pipe = modeset->crtc; + if (pipe >= dev->num_crtcs) return -EINVAL; switch (modeset->cmd) { case _DRM_PRE_MODESET: - drm_vblank_pre_modeset(dev, crtc); + drm_vblank_pre_modeset(dev, pipe); break; case _DRM_POST_MODESET: - drm_vblank_post_modeset(dev, crtc); + drm_vblank_post_modeset(dev, pipe); break; default: return -EINVAL; @@ -1484,7 +1496,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, return 0; } -static int drm_queue_vblank_event(struct drm_device *dev, int pipe, +static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { @@ -1538,7 +1550,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, vblwait->reply.sequence = vblwait->request.sequence; } - DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", + DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", vblwait->request.sequence, seq, pipe); trace_drm_vblank_event_queued(current->pid, pipe, @@ -1587,7 +1599,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_vblank_crtc *vblank; union drm_wait_vblank *vblwait = data; int ret; - unsigned int flags, seq, crtc, high_crtc; + unsigned int flags, seq, pipe, high_pipe; if (!dev->irq_enabled) return -EINVAL; @@ -1606,22 +1618,22 @@ int drm_wait_vblank(struct drm_device *dev, void *data, } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; - high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); - if (high_crtc) - crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; + high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); + if (high_pipe) + pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT; else - crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; - if (crtc >= dev->num_crtcs) + pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; + if (pipe >= dev->num_crtcs) return -EINVAL; - vblank = &dev->vblank[crtc]; + vblank = &dev->vblank[pipe]; - ret = drm_vblank_get(dev, crtc); + ret = drm_vblank_get(dev, pipe); if (ret) { DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); return ret; } - seq = drm_vblank_count(dev, crtc); + seq = drm_vblank_count(dev, pipe); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: @@ -1638,7 +1650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously */ - return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); + return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); } if ((flags & _DRM_VBLANK_NEXTONMISS) && @@ -1646,11 +1658,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data, vblwait->request.sequence = seq + 1; } - DRM_DEBUG("waiting on vblank count %d, crtc %d\n", - vblwait->request.sequence, crtc); + DRM_DEBUG("waiting on vblank count %d, crtc %u\n", + vblwait->request.sequence, pipe); vblank->last_wait = vblwait->request.sequence; DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, - (((drm_vblank_count(dev, crtc) - + (((drm_vblank_count(dev, pipe) - vblwait->request.sequence) <= (1 << 23)) || !vblank->enabled || !dev->irq_enabled)); @@ -1658,7 +1670,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, if (ret != -EINTR) { struct timeval now; - vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); + vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; @@ -1669,11 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data, } done: - drm_vblank_put(dev, crtc); + drm_vblank_put(dev, pipe); return ret; } -static void drm_handle_vblank_events(struct drm_device *dev, int crtc) +static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) { struct drm_pending_vblank_event *e, *t; struct timeval now; @@ -1681,10 +1693,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) assert_spin_locked(&dev->event_lock); - seq = drm_vblank_count_and_time(dev, crtc, &now); + seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { - if (e->pipe != crtc) + if (e->pipe != pipe) continue; if ((seq - e->event.sequence) > (1<<23)) continue; @@ -1693,26 +1705,26 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) e->event.sequence, seq); list_del(&e->base.link); - drm_vblank_put(dev, e->pipe); + drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, &now); } - trace_drm_vblank_event(crtc, seq); + trace_drm_vblank_event(pipe, seq); } /** * drm_handle_vblank - handle a vblank event * @dev: DRM device - * @crtc: where this event occurred + * @pipe: index of CRTC where this event occurred * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. * * This is the legacy version of drm_crtc_handle_vblank(). */ -bool drm_handle_vblank(struct drm_device *dev, int crtc) +bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; u32 vblcount; s64 diff_ns; struct timeval tvblank; @@ -1721,7 +1733,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) if (WARN_ON_ONCE(!dev->num_crtcs)) return false; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return false; spin_lock_irqsave(&dev->event_lock, irqflags); @@ -1745,11 +1757,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) /* Get current timestamp and count. */ vblcount = vblank->count; - drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); + drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ); /* Compute time difference to timestamp of last vblank */ diff_ns = timeval_to_ns(&tvblank) - - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount)); /* Update vblank timestamp and count if at least * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds @@ -1761,15 +1773,15 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) * ignore those for accounting. */ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) - store_vblank(dev, crtc, 1, &tvblank); + store_vblank(dev, pipe, 1, &tvblank); else - DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", - crtc, (int) diff_ns); + DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n", + pipe, (int) diff_ns); spin_unlock(&dev->vblank_time_lock); wake_up(&vblank->queue); - drm_handle_vblank_events(dev, crtc); + drm_handle_vblank_events(dev, pipe); spin_unlock_irqrestore(&dev->event_lock, irqflags); diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 744dfbc6a329..fba321ca4344 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -55,41 +55,27 @@ * drm_modeset_acquire_fini(&ctx); */ - /** - * __drm_modeset_lock_all - internal helper to grab all modeset locks - * @dev: DRM device - * @trylock: trylock mode for atomic contexts - * - * This is a special version of drm_modeset_lock_all() which can also be used in - * atomic contexts. Then @trylock must be set to true. + * drm_modeset_lock_all - take all modeset locks + * @dev: drm device * - * Returns: - * 0 on success or negative error code on failure. + * This function takes all modeset locks, suitable where a more fine-grained + * scheme isn't (yet) implemented. Locks must be dropped with + * drm_modeset_unlock_all. */ -int __drm_modeset_lock_all(struct drm_device *dev, - bool trylock) +void drm_modeset_lock_all(struct drm_device *dev) { struct drm_mode_config *config = &dev->mode_config; struct drm_modeset_acquire_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), - trylock ? GFP_ATOMIC : GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (WARN_ON(!ctx)) + return; - if (trylock) { - if (!mutex_trylock(&config->mutex)) { - ret = -EBUSY; - goto out; - } - } else { - mutex_lock(&config->mutex); - } + mutex_lock(&config->mutex); drm_modeset_acquire_init(ctx, 0); - ctx->trylock_only = trylock; retry: ret = drm_modeset_lock(&config->connection_mutex, ctx); @@ -108,7 +94,7 @@ retry: drm_warn_on_modeset_not_all_locked(dev); - return 0; + return; fail: if (ret == -EDEADLK) { @@ -116,23 +102,7 @@ fail: goto retry; } -out: kfree(ctx); - return ret; -} -EXPORT_SYMBOL(__drm_modeset_lock_all); - -/** - * drm_modeset_lock_all - take all modeset locks - * @dev: drm device - * - * This function takes all modeset locks, suitable where a more fine-grained - * scheme isn't (yet) implemented. Locks must be dropped with - * drm_modeset_unlock_all. - */ -void drm_modeset_lock_all(struct drm_device *dev) -{ - WARN_ON(__drm_modeset_lock_all(dev, false) != 0); } EXPORT_SYMBOL(drm_modeset_lock_all); diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 43003c4ad80b..df0b61a60501 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -56,7 +56,7 @@ config DRM_EXYNOS_DSI config DRM_EXYNOS_DP bool "EXYNOS DRM DP driver support" - depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) + depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) default DRM_EXYNOS select DRM_PANEL help diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index 7de0b1084fcd..02aecfed6354 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -3,10 +3,9 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos -exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \ - exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ - exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ - exynos_drm_plane.o exynos_drm_dmabuf.o +exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \ + exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \ + exynos_drm_plane.o exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 8b1225f245fc..484e312e0a22 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -152,15 +152,15 @@ static void decon_commit(struct exynos_drm_crtc *crtc) #define OFFSIZE(x) (((x) & 0x3fff) << 14) #define PAGEWIDTH(x) ((x) & 0x3fff) -static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) +static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, + struct drm_framebuffer *fb) { - struct exynos_drm_plane *plane = &ctx->planes[win]; unsigned long val; val = readl(ctx->addr + DECON_WINCONx(win)); val &= ~WINCONx_BPPMODE_MASK; - switch (plane->pixel_format) { + switch (fb->pixel_format) { case DRM_FORMAT_XRGB1555: val |= WINCONx_BPPMODE_16BPP_I1555; val |= WINCONx_HAWSWP_F; @@ -186,7 +186,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) return; } - DRM_DEBUG_KMS("bpp = %u\n", plane->bpp); + DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel); /* * In case of exynos, setting dma-burst to 16Word causes permanent @@ -196,7 +196,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) * movement causes unstable DMA which results into iommu crash/tear. */ - if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) { + if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) { val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_8WORD; } @@ -219,17 +219,16 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win, writel(val, ctx->addr + DECON_SHADOWCON); } -static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_update_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; + struct drm_plane_state *state = plane->base.state; + unsigned int win = plane->zpos; + unsigned int bpp = state->fb->bits_per_pixel >> 3; + unsigned int pitch = state->fb->pitches[0]; u32 val; - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; - if (ctx->suspended) return; @@ -238,8 +237,8 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y); writel(val, ctx->addr + DECON_VIDOSDxA(win)); - val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) | - COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1); + val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) | + COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1); writel(val, ctx->addr + DECON_VIDOSDxB(win)); val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | @@ -252,14 +251,14 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win)); - val = plane->dma_addr[0] + plane->pitch * plane->crtc_height; + val = plane->dma_addr[0] + pitch * plane->crtc_h; writel(val, ctx->addr + DECON_VIDW0xADD1B0(win)); - val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3)) - | PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3)); + val = OFFSIZE(pitch - plane->crtc_w * bpp) + | PAGEWIDTH(plane->crtc_w * bpp); writel(val, ctx->addr + DECON_VIDW0xADD2(win)); - decon_win_set_pixfmt(ctx, win); + decon_win_set_pixfmt(ctx, win, state->fb); /* window enable */ val = readl(ctx->addr + DECON_WINCONx(win)); @@ -277,17 +276,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) atomic_set(&ctx->win_updated, 1); } -static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_disable_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; + unsigned int win = plane->zpos; u32 val; - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; - if (ctx->suspended) return; @@ -378,7 +373,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc) * a destroyed buffer later. */ for (i = 0; i < WINDOWS_NR; i++) - decon_win_disable(crtc, i); + decon_disable_plane(crtc, &ctx->planes[i]); decon_swreset(ctx); @@ -407,7 +402,7 @@ void decon_te_irq_handler(struct exynos_drm_crtc *crtc) writel(val, ctx->addr + DECON_TRIGCON); } - drm_handle_vblank(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); } static void decon_clear_channels(struct exynos_drm_crtc *crtc) @@ -460,10 +455,9 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = { .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, .commit = decon_commit, - .win_commit = decon_win_commit, - .win_disable = decon_win_disable, + .update_plane = decon_update_plane, + .disable_plane = decon_disable_plane, .te_handler = decon_te_irq_handler, - .clear_channels = decon_clear_channels, }; static int decon_bind(struct device *dev, struct device *master, void *data) @@ -497,7 +491,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data) goto err; } - ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev); + decon_clear_channels(ctx->crtc); + + ret = drm_iommu_attach_device(drm_dev, dev); if (ret) goto err; @@ -514,8 +510,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data) decon_disable(ctx->crtc); /* detach this sub driver from iommu mapping if supported. */ - if (is_drm_iommu_supported(ctx->drm_dev)) - drm_iommu_detach_device(ctx->drm_dev, ctx->dev); + drm_iommu_detach_device(ctx->drm_dev, ctx->dev); } static const struct component_ops decon_component_ops = { @@ -533,7 +528,7 @@ static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id) val = readl(ctx->addr + DECON_VIDINTCON1); if (val & VIDINTCON1_INTFRMPEND) { - drm_handle_vblank(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); /* clear */ writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1); @@ -553,7 +548,7 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id) val = readl(ctx->addr + DECON_VIDINTCON1); if (val & VIDINTCON1_INTFRMDONEPEND) { - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + exynos_drm_crtc_finish_pageflip(ctx->crtc); /* clear */ writel(VIDINTCON1_INTFRMDONEPEND, diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 362532afd1a5..07926547c94f 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -61,7 +61,7 @@ struct decon_context { atomic_t wait_vsync_event; struct exynos_drm_panel_info panel; - struct exynos_drm_display *display; + struct drm_encoder *encoder; }; static const struct of_device_id decon_driver_dt_match[] = { @@ -126,7 +126,9 @@ static int decon_ctx_initialize(struct decon_context *ctx, ctx->drm_dev = drm_dev; ctx->pipe = priv->pipe++; - ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev); + decon_clear_channels(ctx->crtc); + + ret = drm_iommu_attach_device(drm_dev, ctx->dev); if (ret) priv->pipe--; @@ -136,8 +138,7 @@ static int decon_ctx_initialize(struct decon_context *ctx, static void decon_ctx_remove(struct decon_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ - if (is_drm_iommu_supported(ctx->drm_dev)) - drm_iommu_detach_device(ctx->drm_dev, ctx->dev); + drm_iommu_detach_device(ctx->drm_dev, ctx->dev); } static u32 decon_calc_clkdiv(struct decon_context *ctx, @@ -271,16 +272,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) } } -static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) +static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, + struct drm_framebuffer *fb) { - struct exynos_drm_plane *plane = &ctx->planes[win]; unsigned long val; int padding; val = readl(ctx->regs + WINCON(win)); val &= ~WINCONx_BPPMODE_MASK; - switch (plane->pixel_format) { + switch (fb->pixel_format) { case DRM_FORMAT_RGB565: val |= WINCONx_BPPMODE_16BPP_565; val |= WINCONx_BURSTLEN_16WORD; @@ -329,7 +330,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) break; } - DRM_DEBUG_KMS("bpp = %d\n", plane->bpp); + DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel); /* * In case of exynos, setting dma-burst to 16Word causes permanent @@ -339,8 +340,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) * movement causes unstable DMA which results into iommu crash/tear. */ - padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width; - if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { + padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width; + if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_8WORD; } @@ -382,23 +383,19 @@ static void decon_shadow_protect_win(struct decon_context *ctx, writel(val, ctx->regs + SHADOWCON); } -static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_update_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; - struct exynos_drm_plane *plane; + struct drm_plane_state *state = plane->base.state; int padding; unsigned long val, alpha; unsigned int last_x; unsigned int last_y; - - if (ctx->suspended) - return; - - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; + unsigned int win = plane->zpos; + unsigned int bpp = state->fb->bits_per_pixel >> 3; + unsigned int pitch = state->fb->pitches[0]; if (ctx->suspended) return; @@ -420,11 +417,11 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) val = (unsigned long)plane->dma_addr[0]; writel(val, ctx->regs + VIDW_BUF_START(win)); - padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width; + padding = (pitch / bpp) - state->fb->width; /* buffer size */ - writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win)); - writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win)); + writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win)); + writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win)); /* offset from the start of the buffer to read */ writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win)); @@ -433,25 +430,25 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) DRM_DEBUG_KMS("start addr = 0x%lx\n", (unsigned long)val); DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", - plane->crtc_width, plane->crtc_height); + plane->crtc_w, plane->crtc_h); /* * OSD position. * In case the window layout goes of LCD layout, DECON fails. */ - if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay) - plane->crtc_x = mode->hdisplay - plane->crtc_width; - if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay) - plane->crtc_y = mode->vdisplay - plane->crtc_height; + if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay) + plane->crtc_x = mode->hdisplay - plane->crtc_w; + if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay) + plane->crtc_y = mode->vdisplay - plane->crtc_h; val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | VIDOSDxA_TOPLEFT_Y(plane->crtc_y); writel(val, ctx->regs + VIDOSD_A(win)); - last_x = plane->crtc_x + plane->crtc_width; + last_x = plane->crtc_x + plane->crtc_w; if (last_x) last_x--; - last_y = plane->crtc_y + plane->crtc_height; + last_y = plane->crtc_y + plane->crtc_h; if (last_y) last_y--; @@ -475,7 +472,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) writel(alpha, ctx->regs + VIDOSD_D(win)); - decon_win_set_pixfmt(ctx, win); + decon_win_set_pixfmt(ctx, win, state->fb); /* hardware window 0 doesn't support color key. */ if (win != 0) @@ -495,17 +492,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) writel(val, ctx->regs + DECON_UPDATE); } -static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_disable_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; + unsigned int win = plane->zpos; u32 val; - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; - if (ctx->suspended) return; @@ -601,7 +594,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc) * a destroyed buffer later. */ for (i = 0; i < WINDOWS_NR; i++) - decon_win_disable(crtc, i); + decon_disable_plane(crtc, &ctx->planes[i]); clk_disable_unprepare(ctx->vclk); clk_disable_unprepare(ctx->eclk); @@ -621,9 +614,8 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, .wait_for_vblank = decon_wait_for_vblank, - .win_commit = decon_win_commit, - .win_disable = decon_win_disable, - .clear_channels = decon_clear_channels, + .update_plane = decon_update_plane, + .disable_plane = decon_disable_plane, }; @@ -643,8 +635,8 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) goto out; if (!ctx->i80_if) { - drm_handle_vblank(ctx->drm_dev, ctx->pipe); - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); + exynos_drm_crtc_finish_pageflip(ctx->crtc); /* set wait vsync event to zero and wake up queue. */ if (atomic_read(&ctx->wait_vsync_event)) { @@ -689,8 +681,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(ctx->crtc); } - if (ctx->display) - exynos_drm_create_enc_conn(drm_dev, ctx->display); + if (ctx->encoder) + exynos_dpi_bind(drm_dev, ctx->encoder); return 0; @@ -703,8 +695,8 @@ static void decon_unbind(struct device *dev, struct device *master, decon_disable(ctx->crtc); - if (ctx->display) - exynos_dpi_remove(ctx->display); + if (ctx->encoder) + exynos_dpi_remove(ctx->encoder); decon_ctx_remove(ctx); } @@ -789,9 +781,9 @@ static int decon_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); - ctx->display = exynos_dpi_probe(dev); - if (IS_ERR(ctx->display)) { - ret = PTR_ERR(ctx->display); + ctx->encoder = exynos_dpi_probe(dev); + if (IS_ERR(ctx->encoder)) { + ret = PTR_ERR(ctx->encoder); goto err_iounmap; } diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index 172b8002a2c8..d66ade0efac8 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -32,19 +32,20 @@ #include <drm/drm_panel.h> #include "exynos_dp_core.h" +#include "exynos_drm_crtc.h" #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ connector) static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp) { - return to_exynos_crtc(dp->encoder->crtc); + return to_exynos_crtc(dp->encoder.crtc); } -static inline struct exynos_dp_device * -display_to_dp(struct exynos_drm_display *d) +static inline struct exynos_dp_device *encoder_to_dp( + struct drm_encoder *e) { - return container_of(d, struct exynos_dp_device, display); + return container_of(e, struct exynos_dp_device, encoder); } struct bridge_init { @@ -795,9 +796,6 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp) /* Configure video slave mode */ exynos_dp_enable_video_master(dp, 0); - /* Enable video */ - exynos_dp_start_video(dp); - timeout_loop = 0; for (;;) { @@ -891,9 +889,9 @@ static void exynos_dp_hotplug(struct work_struct *work) drm_helper_hpd_irq_event(dp->drm_dev); } -static void exynos_dp_commit(struct exynos_drm_display *display) +static void exynos_dp_commit(struct drm_encoder *encoder) { - struct exynos_dp_device *dp = display_to_dp(display); + struct exynos_dp_device *dp = encoder_to_dp(encoder); int ret; /* Keep the panel disabled while we configure video */ @@ -938,6 +936,9 @@ static void exynos_dp_commit(struct exynos_drm_display *display) if (drm_panel_enable(dp->panel)) DRM_ERROR("failed to enable the panel\n"); } + + /* Enable video */ + exynos_dp_start_video(dp); } static enum drm_connector_status exynos_dp_detect( @@ -994,7 +995,7 @@ static struct drm_encoder *exynos_dp_best_encoder( { struct exynos_dp_device *dp = ctx_from_connector(connector); - return dp->encoder; + return &dp->encoder; } static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { @@ -1019,15 +1020,12 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp, return 0; } -static int exynos_dp_create_connector(struct exynos_drm_display *display, - struct drm_encoder *encoder) +static int exynos_dp_create_connector(struct drm_encoder *encoder) { - struct exynos_dp_device *dp = display_to_dp(display); + struct exynos_dp_device *dp = encoder_to_dp(encoder); struct drm_connector *connector = &dp->connector; int ret; - dp->encoder = encoder; - /* Pre-empt DP connector creation if there's a bridge */ if (dp->bridge) { ret = exynos_drm_attach_lcd_bridge(dp, encoder); @@ -1054,20 +1052,22 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display, return ret; } -static void exynos_dp_phy_init(struct exynos_dp_device *dp) +static bool exynos_dp_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - if (dp->phy) - phy_power_on(dp->phy); + return true; } -static void exynos_dp_phy_exit(struct exynos_dp_device *dp) +static void exynos_dp_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - if (dp->phy) - phy_power_off(dp->phy); } -static void exynos_dp_poweron(struct exynos_dp_device *dp) +static void exynos_dp_enable(struct drm_encoder *encoder) { + struct exynos_dp_device *dp = encoder_to_dp(encoder); struct exynos_drm_crtc *crtc = dp_to_crtc(dp); if (dp->dpms_mode == DRM_MODE_DPMS_ON) @@ -1084,14 +1084,17 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp) crtc->ops->clock_enable(dp_to_crtc(dp), true); clk_prepare_enable(dp->clock); - exynos_dp_phy_init(dp); + phy_power_on(dp->phy); exynos_dp_init_dp(dp); enable_irq(dp->irq); - exynos_dp_commit(&dp->display); + exynos_dp_commit(&dp->encoder); + + dp->dpms_mode = DRM_MODE_DPMS_ON; } -static void exynos_dp_poweroff(struct exynos_dp_device *dp) +static void exynos_dp_disable(struct drm_encoder *encoder) { + struct exynos_dp_device *dp = encoder_to_dp(encoder); struct exynos_drm_crtc *crtc = dp_to_crtc(dp); if (dp->dpms_mode != DRM_MODE_DPMS_ON) @@ -1106,7 +1109,7 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp) disable_irq(dp->irq); flush_work(&dp->hotplug_work); - exynos_dp_phy_exit(dp); + phy_power_off(dp->phy); clk_disable_unprepare(dp->clock); if (crtc->ops->clock_enable) @@ -1116,31 +1119,19 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp) if (drm_panel_unprepare(dp->panel)) DRM_ERROR("failed to turnoff the panel\n"); } -} - -static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) -{ - struct exynos_dp_device *dp = display_to_dp(display); - switch (mode) { - case DRM_MODE_DPMS_ON: - exynos_dp_poweron(dp); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - exynos_dp_poweroff(dp); - break; - default: - break; - } - dp->dpms_mode = mode; + dp->dpms_mode = DRM_MODE_DPMS_OFF; } -static struct exynos_drm_display_ops exynos_dp_display_ops = { - .create_connector = exynos_dp_create_connector, - .dpms = exynos_dp_dpms, - .commit = exynos_dp_commit, +static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { + .mode_fixup = exynos_dp_mode_fixup, + .mode_set = exynos_dp_mode_set, + .enable = exynos_dp_enable, + .disable = exynos_dp_disable, +}; + +static struct drm_encoder_funcs exynos_dp_encoder_funcs = { + .destroy = drm_encoder_cleanup, }; static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev) @@ -1219,9 +1210,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) struct exynos_dp_device *dp = dev_get_drvdata(dev); struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm_dev = data; + struct drm_encoder *encoder = &dp->encoder; struct resource *res; unsigned int irq_flags; - int ret = 0; + int pipe, ret = 0; dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; @@ -1297,7 +1289,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); - exynos_dp_phy_init(dp); + phy_power_on(dp->phy); exynos_dp_init_dp(dp); @@ -1311,7 +1303,28 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) dp->drm_dev = drm_dev; - return exynos_drm_create_enc_conn(drm_dev, &dp->display); + pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev, + EXYNOS_DISPLAY_TYPE_LCD); + if (pipe < 0) + return pipe; + + encoder->possible_crtcs = 1 << pipe; + + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); + + ret = exynos_dp_create_connector(encoder); + if (ret) { + DRM_ERROR("failed to create connector ret = %d\n", ret); + drm_encoder_cleanup(encoder); + return ret; + } + + return 0; } static void exynos_dp_unbind(struct device *dev, struct device *master, @@ -1319,7 +1332,7 @@ static void exynos_dp_unbind(struct device *dev, struct device *master, { struct exynos_dp_device *dp = dev_get_drvdata(dev); - exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF); + exynos_dp_disable(&dp->encoder); } static const struct component_ops exynos_dp_ops = { @@ -1338,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; - dp->display.type = EXYNOS_DISPLAY_TYPE_LCD; - dp->display.ops = &exynos_dp_display_ops; platform_set_drvdata(pdev, dp); panel_node = of_parse_phandle(dev->of_node, "panel", 0); @@ -1377,7 +1388,7 @@ static int exynos_dp_suspend(struct device *dev) { struct exynos_dp_device *dp = dev_get_drvdata(dev); - exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF); + exynos_dp_disable(&dp->encoder); return 0; } @@ -1385,7 +1396,7 @@ static int exynos_dp_resume(struct device *dev) { struct exynos_dp_device *dp = dev_get_drvdata(dev); - exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON); + exynos_dp_enable(&dp->encoder); return 0; } #endif diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h index a4e799679669..e413b6f7b0e7 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -147,11 +147,10 @@ struct link_train { }; struct exynos_dp_device { - struct exynos_drm_display display; + struct drm_encoder encoder; struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; - struct drm_encoder *encoder; struct drm_panel *panel; struct drm_bridge *bridge; struct clk *clock; diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c deleted file mode 100644 index 24994ba10e28..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ /dev/null @@ -1,186 +0,0 @@ -/* exynos_drm_buf.c - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Author: Inki Dae <inki.dae@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/exynos_drm.h> - -#include "exynos_drm_drv.h" -#include "exynos_drm_gem.h" -#include "exynos_drm_buf.h" -#include "exynos_drm_iommu.h" - -static int lowlevel_buffer_allocate(struct drm_device *dev, - unsigned int flags, struct exynos_drm_gem_buf *buf) -{ - int ret = 0; - enum dma_attr attr; - unsigned int nr_pages; - - if (buf->dma_addr) { - DRM_DEBUG_KMS("already allocated.\n"); - return 0; - } - - init_dma_attrs(&buf->dma_attrs); - - /* - * if EXYNOS_BO_CONTIG, fully physically contiguous memory - * region will be allocated else physically contiguous - * as possible. - */ - if (!(flags & EXYNOS_BO_NONCONTIG)) - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs); - - /* - * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping - * else cachable mapping. - */ - if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE)) - attr = DMA_ATTR_WRITE_COMBINE; - else - attr = DMA_ATTR_NON_CONSISTENT; - - dma_set_attr(attr, &buf->dma_attrs); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs); - - nr_pages = buf->size >> PAGE_SHIFT; - - if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - - buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); - if (!buf->pages) { - DRM_ERROR("failed to allocate pages.\n"); - return -ENOMEM; - } - - buf->cookie = dma_alloc_attrs(dev->dev, - buf->size, - &buf->dma_addr, GFP_KERNEL, - &buf->dma_attrs); - if (!buf->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); - ret = -ENOMEM; - goto err_free; - } - - start_addr = buf->dma_addr; - while (i < nr_pages) { - buf->pages[i] = phys_to_page(start_addr); - start_addr += PAGE_SIZE; - i++; - } - } else { - - buf->pages = dma_alloc_attrs(dev->dev, buf->size, - &buf->dma_addr, GFP_KERNEL, - &buf->dma_attrs); - if (!buf->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } - } - - buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); - if (IS_ERR(buf->sgt)) { - DRM_ERROR("failed to get sg table.\n"); - ret = PTR_ERR(buf->sgt); - goto err_free_attrs; - } - - DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", - (unsigned long)buf->dma_addr, - buf->size); - - return ret; - -err_free_attrs: - dma_free_attrs(dev->dev, buf->size, buf->pages, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - buf->dma_addr = (dma_addr_t)NULL; -err_free: - if (!is_drm_iommu_supported(dev)) - drm_free_large(buf->pages); - - return ret; -} - -static void lowlevel_buffer_deallocate(struct drm_device *dev, - unsigned int flags, struct exynos_drm_gem_buf *buf) -{ - if (!buf->dma_addr) { - DRM_DEBUG_KMS("dma_addr is invalid.\n"); - return; - } - - DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", - (unsigned long)buf->dma_addr, - buf->size); - - sg_free_table(buf->sgt); - - kfree(buf->sgt); - buf->sgt = NULL; - - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, buf->size, buf->cookie, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - drm_free_large(buf->pages); - } else - dma_free_attrs(dev->dev, buf->size, buf->pages, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - - buf->dma_addr = (dma_addr_t)NULL; -} - -struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, - unsigned int size) -{ - struct exynos_drm_gem_buf *buffer; - - DRM_DEBUG_KMS("desired size = 0x%x\n", size); - - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) - return NULL; - - buffer->size = size; - return buffer; -} - -void exynos_drm_fini_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buffer) -{ - kfree(buffer); - buffer = NULL; -} - -int exynos_drm_alloc_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buf, unsigned int flags) -{ - - /* - * allocate memory region and set the memory information - * to vaddr and dma_addr of a buffer object. - */ - if (lowlevel_buffer_allocate(dev, flags, buf) < 0) - return -ENOMEM; - - return 0; -} - -void exynos_drm_free_buf(struct drm_device *dev, - unsigned int flags, struct exynos_drm_gem_buf *buffer) -{ - - lowlevel_buffer_deallocate(dev, flags, buffer); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h deleted file mode 100644 index a6412f19673c..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ /dev/null @@ -1,33 +0,0 @@ -/* exynos_drm_buf.h - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Author: Inki Dae <inki.dae@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_BUF_H_ -#define _EXYNOS_DRM_BUF_H_ - -/* create and initialize buffer object. */ -struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, - unsigned int size); - -/* destroy buffer object. */ -void exynos_drm_fini_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buffer); - -/* allocate physical memory region and setup sgt. */ -int exynos_drm_alloc_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buf, - unsigned int flags); - -/* release physical memory region, and sgt. */ -void exynos_drm_free_buf(struct drm_device *dev, - unsigned int flags, - struct exynos_drm_gem_buf *buffer); - -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 4c9f972eaa07..c68a6a2a9b57 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -15,46 +15,10 @@ #include <drm/drmP.h> #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" -#include "exynos_drm_encoder.h" #include "exynos_drm_fbdev.h" static LIST_HEAD(exynos_drm_subdrv_list); -int exynos_drm_create_enc_conn(struct drm_device *dev, - struct exynos_drm_display *display) -{ - struct drm_encoder *encoder; - int ret; - unsigned long possible_crtcs = 0; - - ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type); - if (ret < 0) - return ret; - - possible_crtcs |= 1 << ret; - - /* create and initialize a encoder for this sub driver. */ - encoder = exynos_drm_encoder_create(dev, display, possible_crtcs); - if (!encoder) { - DRM_ERROR("failed to create encoder\n"); - return -EFAULT; - } - - display->encoder = encoder; - - ret = display->ops->create_connector(display, encoder); - if (ret) { - DRM_ERROR("failed to create connector ret = %d\n", ret); - goto err_destroy_encoder; - } - - return 0; - -err_destroy_encoder: - encoder->funcs->destroy(encoder); - return ret; -} - int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) { if (!subdrv) diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 1610757230a5..c47899738eb4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -19,7 +19,6 @@ #include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" -#include "exynos_drm_encoder.h" #include "exynos_drm_plane.h" static void exynos_drm_crtc_enable(struct drm_crtc *crtc) @@ -177,7 +176,7 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) return -EPERM; if (exynos_crtc->ops->enable_vblank) - exynos_crtc->ops->enable_vblank(exynos_crtc); + return exynos_crtc->ops->enable_vblank(exynos_crtc); return 0; } @@ -195,24 +194,22 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe) exynos_crtc->ops->disable_vblank(exynos_crtc); } -void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe) +void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc) { - struct exynos_drm_private *dev_priv = dev->dev_private; - struct drm_crtc *drm_crtc = dev_priv->crtc[pipe]; - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc); + struct drm_crtc *crtc = &exynos_crtc->base; unsigned long flags; - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock_irqsave(&crtc->dev->event_lock, flags); if (exynos_crtc->event) { - drm_send_vblank_event(dev, -1, exynos_crtc->event); - drm_vblank_put(dev, pipe); + drm_crtc_send_vblank_event(crtc, exynos_crtc->event); + drm_crtc_vblank_put(crtc); wake_up(&exynos_crtc->pending_flip_queue); } exynos_crtc->event = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb) @@ -239,7 +236,7 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb) } int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, - unsigned int out_type) + enum exynos_drm_output_type out_type) { struct drm_crtc *crtc; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 0f3aa70818e3..9e7027d6c2f6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -25,12 +25,12 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, void *context); int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe); -void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe); +void exynos_drm_crtc_finish_pageflip(struct exynos_drm_crtc *exynos_crtc); void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb); /* This function gets pipe value to crtc device matched with out_type. */ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, - unsigned int out_type); + enum exynos_drm_output_type out_type); /* * This function calls the crtc device(manager)'s te_handler() callback diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c deleted file mode 100644 index cd485c091b30..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ /dev/null @@ -1,286 +0,0 @@ -/* exynos_drm_dmabuf.c - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Author: Inki Dae <inki.dae@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/exynos_drm.h> -#include "exynos_drm_dmabuf.h" -#include "exynos_drm_drv.h" -#include "exynos_drm_gem.h" - -#include <linux/dma-buf.h> - -struct exynos_drm_dmabuf_attachment { - struct sg_table sgt; - enum dma_data_direction dir; - bool is_mapped; -}; - -static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) -{ - return to_exynos_gem_obj(buf->priv); -} - -static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, - struct device *dev, - struct dma_buf_attachment *attach) -{ - struct exynos_drm_dmabuf_attachment *exynos_attach; - - exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL); - if (!exynos_attach) - return -ENOMEM; - - exynos_attach->dir = DMA_NONE; - attach->priv = exynos_attach; - - return 0; -} - -static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) -{ - struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; - struct sg_table *sgt; - - if (!exynos_attach) - return; - - sgt = &exynos_attach->sgt; - - if (exynos_attach->dir != DMA_NONE) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, - exynos_attach->dir); - - sg_free_table(sgt); - kfree(exynos_attach); - attach->priv = NULL; -} - -static struct sg_table * - exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction dir) -{ - struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; - struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); - struct drm_device *dev = gem_obj->base.dev; - struct exynos_drm_gem_buf *buf; - struct scatterlist *rd, *wr; - struct sg_table *sgt = NULL; - unsigned int i; - int nents, ret; - - /* just return current sgt if already requested. */ - if (exynos_attach->dir == dir && exynos_attach->is_mapped) - return &exynos_attach->sgt; - - buf = gem_obj->buffer; - if (!buf) { - DRM_ERROR("buffer is null.\n"); - return ERR_PTR(-ENOMEM); - } - - sgt = &exynos_attach->sgt; - - ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); - if (ret) { - DRM_ERROR("failed to alloc sgt.\n"); - return ERR_PTR(-ENOMEM); - } - - mutex_lock(&dev->struct_mutex); - - rd = buf->sgt->sgl; - wr = sgt->sgl; - for (i = 0; i < sgt->orig_nents; ++i) { - sg_set_page(wr, sg_page(rd), rd->length, rd->offset); - rd = sg_next(rd); - wr = sg_next(wr); - } - - if (dir != DMA_NONE) { - nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with iommu.\n"); - sg_free_table(sgt); - sgt = ERR_PTR(-EIO); - goto err_unlock; - } - } - - exynos_attach->is_mapped = true; - exynos_attach->dir = dir; - attach->priv = exynos_attach; - - DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); - -err_unlock: - mutex_unlock(&dev->struct_mutex); - return sgt; -} - -static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - /* Nothing to do. */ -} - -static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num, - void *addr) -{ - /* TODO */ -} - -static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, - unsigned long page_num, void *addr) -{ - /* TODO */ -} - -static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, - struct vm_area_struct *vma) -{ - return -ENOTTY; -} - -static struct dma_buf_ops exynos_dmabuf_ops = { - .attach = exynos_gem_attach_dma_buf, - .detach = exynos_gem_detach_dma_buf, - .map_dma_buf = exynos_gem_map_dma_buf, - .unmap_dma_buf = exynos_gem_unmap_dma_buf, - .kmap = exynos_gem_dmabuf_kmap, - .kmap_atomic = exynos_gem_dmabuf_kmap_atomic, - .kunmap = exynos_gem_dmabuf_kunmap, - .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, - .mmap = exynos_gem_dmabuf_mmap, - .release = drm_gem_dmabuf_release, -}; - -struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, - struct drm_gem_object *obj, int flags) -{ - struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &exynos_dmabuf_ops; - exp_info.size = exynos_gem_obj->base.size; - exp_info.flags = flags; - exp_info.priv = obj; - - return dma_buf_export(&exp_info); -} - -struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, - struct dma_buf *dma_buf) -{ - struct dma_buf_attachment *attach; - struct sg_table *sgt; - struct scatterlist *sgl; - struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_gem_buf *buffer; - int ret; - - /* is this one of own objects? */ - if (dma_buf->ops == &exynos_dmabuf_ops) { - struct drm_gem_object *obj; - - obj = dma_buf->priv; - - /* is it from our device? */ - if (obj->dev == drm_dev) { - /* - * Importing dmabuf exported from out own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - drm_gem_object_reference(obj); - return obj; - } - } - - attach = dma_buf_attach(dma_buf, drm_dev->dev); - if (IS_ERR(attach)) - return ERR_PTR(-EINVAL); - - get_dma_buf(dma_buf); - - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto err_buf_detach; - } - - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) { - ret = -ENOMEM; - goto err_unmap_attach; - } - - exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); - if (!exynos_gem_obj) { - ret = -ENOMEM; - goto err_free_buffer; - } - - sgl = sgt->sgl; - - buffer->size = dma_buf->size; - buffer->dma_addr = sg_dma_address(sgl); - - if (sgt->nents == 1) { - /* always physically continuous memory if sgt->nents is 1. */ - exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; - } else { - /* - * this case could be CONTIG or NONCONTIG type but for now - * sets NONCONTIG. - * TODO. we have to find a way that exporter can notify - * the type of its own buffer to importer. - */ - exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; - } - - exynos_gem_obj->buffer = buffer; - buffer->sgt = sgt; - exynos_gem_obj->base.import_attach = attach; - - DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, - buffer->size); - - return &exynos_gem_obj->base; - -err_free_buffer: - kfree(buffer); - buffer = NULL; -err_unmap_attach: - dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); -err_buf_detach: - dma_buf_detach(dma_buf, attach); - dma_buf_put(dma_buf); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h deleted file mode 100644 index 886de9ff484d..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h +++ /dev/null @@ -1,20 +0,0 @@ -/* exynos_drm_dmabuf.h - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Author: Inki Dae <inki.dae@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_DMABUF_H_ -#define _EXYNOS_DRM_DMABUF_H_ - -struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, - struct drm_gem_object *obj, int flags); - -struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, - struct dma_buf *dma_buf); -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 7cb6595c1894..c748b8790de3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -20,26 +20,24 @@ #include <video/of_videomode.h> #include <video/videomode.h> -#include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" struct exynos_dpi { - struct exynos_drm_display display; + struct drm_encoder encoder; struct device *dev; struct device_node *panel_node; struct drm_panel *panel; struct drm_connector connector; - struct drm_encoder *encoder; struct videomode *vm; - int dpms_mode; }; #define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector) -static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d) +static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e) { - return container_of(d, struct exynos_dpi, display); + return container_of(e, struct exynos_dpi, encoder); } static enum drm_connector_status @@ -99,7 +97,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector) { struct exynos_dpi *ctx = connector_to_dpi(connector); - return ctx->encoder; + return &ctx->encoder; } static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { @@ -107,15 +105,12 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { .best_encoder = exynos_dpi_best_encoder, }; -static int exynos_dpi_create_connector(struct exynos_drm_display *display, - struct drm_encoder *encoder) +static int exynos_dpi_create_connector(struct drm_encoder *encoder) { - struct exynos_dpi *ctx = display_to_dpi(display); + struct exynos_dpi *ctx = encoder_to_dpi(encoder); struct drm_connector *connector = &ctx->connector; int ret; - ctx->encoder = encoder; - connector->polled = DRM_CONNECTOR_POLL_HPD; ret = drm_connector_init(encoder->dev, connector, @@ -133,46 +128,48 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display, return 0; } -static void exynos_dpi_poweron(struct exynos_dpi *ctx) +static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void exynos_dpi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { +} + +static void exynos_dpi_enable(struct drm_encoder *encoder) +{ + struct exynos_dpi *ctx = encoder_to_dpi(encoder); + if (ctx->panel) { drm_panel_prepare(ctx->panel); drm_panel_enable(ctx->panel); } } -static void exynos_dpi_poweroff(struct exynos_dpi *ctx) +static void exynos_dpi_disable(struct drm_encoder *encoder) { + struct exynos_dpi *ctx = encoder_to_dpi(encoder); + if (ctx->panel) { drm_panel_disable(ctx->panel); drm_panel_unprepare(ctx->panel); } } -static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode) -{ - struct exynos_dpi *ctx = display_to_dpi(display); - - switch (mode) { - case DRM_MODE_DPMS_ON: - if (ctx->dpms_mode != DRM_MODE_DPMS_ON) - exynos_dpi_poweron(ctx); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - if (ctx->dpms_mode == DRM_MODE_DPMS_ON) - exynos_dpi_poweroff(ctx); - break; - default: - break; - } - ctx->dpms_mode = mode; -} +static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = { + .mode_fixup = exynos_dpi_mode_fixup, + .mode_set = exynos_dpi_mode_set, + .enable = exynos_dpi_enable, + .disable = exynos_dpi_disable, +}; -static struct exynos_drm_display_ops exynos_dpi_display_ops = { - .create_connector = exynos_dpi_create_connector, - .dpms = exynos_dpi_dpms +static struct drm_encoder_funcs exynos_dpi_encoder_funcs = { + .destroy = drm_encoder_cleanup, }; /* of_* functions will be removed after merge of of_graph patches */ @@ -299,7 +296,34 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx) return 0; } -struct exynos_drm_display *exynos_dpi_probe(struct device *dev) +int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder) +{ + int ret; + + ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD); + if (ret < 0) + return ret; + + encoder->possible_crtcs = 1 << ret; + + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs); + + ret = exynos_dpi_create_connector(encoder); + if (ret) { + DRM_ERROR("failed to create connector ret = %d\n", ret); + drm_encoder_cleanup(encoder); + return ret; + } + + return 0; +} + +struct drm_encoder *exynos_dpi_probe(struct device *dev) { struct exynos_dpi *ctx; int ret; @@ -308,10 +332,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev) if (!ctx) return ERR_PTR(-ENOMEM); - ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD; - ctx->display.ops = &exynos_dpi_display_ops; ctx->dev = dev; - ctx->dpms_mode = DRM_MODE_DPMS_OFF; ret = exynos_dpi_parse_dt(ctx); if (ret < 0) { @@ -325,14 +346,14 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev) return ERR_PTR(-EPROBE_DEFER); } - return &ctx->display; + return &ctx->encoder; } -int exynos_dpi_remove(struct exynos_drm_display *display) +int exynos_dpi_remove(struct drm_encoder *encoder) { - struct exynos_dpi *ctx = display_to_dpi(display); + struct exynos_dpi *ctx = encoder_to_dpi(encoder); - exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF); + exynos_dpi_disable(&ctx->encoder); if (ctx->panel) drm_panel_detach(ctx->panel); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 63a68c60a353..fa5194caf259 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -21,13 +21,11 @@ #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" -#include "exynos_drm_encoder.h" #include "exynos_drm_fbdev.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" #include "exynos_drm_plane.h" #include "exynos_drm_vidi.h" -#include "exynos_drm_dmabuf.h" #include "exynos_drm_g2d.h" #include "exynos_drm_ipp.h" #include "exynos_drm_iommu.h" @@ -41,7 +39,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) { struct exynos_drm_private *private; - int ret; + struct drm_encoder *encoder; + unsigned int clone_mask; + int cnt, ret; private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); if (!private) @@ -67,7 +67,13 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) exynos_drm_mode_config_init(dev); /* setup possible_clones. */ - exynos_drm_encoder_setup(dev); + cnt = 0; + clone_mask = 0; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + clone_mask |= (1 << (cnt++)); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + encoder->possible_clones = clone_mask; platform_set_drvdata(dev->platformdev, dev); @@ -297,8 +303,12 @@ static struct drm_driver exynos_drm_driver = { .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = exynos_dmabuf_prime_export, - .gem_prime_import = exynos_dmabuf_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table, + .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, + .gem_prime_vmap = exynos_drm_gem_prime_vmap, + .gem_prime_vunmap = exynos_drm_gem_prime_vunmap, .ioctls = exynos_ioctls, .num_ioctls = ARRAY_SIZE(exynos_ioctls), .fops = &exynos_drm_driver_fops, @@ -345,9 +355,6 @@ static struct platform_driver exynos_drm_platform_driver; * because connector requires pipe number of its crtc during initialization. */ static struct platform_driver *const exynos_drm_kms_drivers[] = { -#ifdef CONFIG_DRM_EXYNOS_VIDI - &vidi_driver, -#endif #ifdef CONFIG_DRM_EXYNOS_FIMD &fimd_driver, #endif @@ -370,6 +377,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = { &mixer_driver, &hdmi_driver, #endif +#ifdef CONFIG_DRM_EXYNOS_VIDI + &vidi_driver, +#endif }; static struct platform_driver *const exynos_drm_non_kms_drivers[] = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index dd00f160c1e5..6b8a30f23473 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -44,23 +44,14 @@ enum exynos_drm_output_type { * - the unit is screen coordinates. * @src_y: offset y on a framebuffer to be displayed. * - the unit is screen coordinates. - * @src_width: width of a partial image to be displayed from framebuffer. - * @src_height: height of a partial image to be displayed from framebuffer. - * @fb_width: width of a framebuffer. - * @fb_height: height of a framebuffer. + * @src_w: width of a partial image to be displayed from framebuffer. + * @src_h: height of a partial image to be displayed from framebuffer. * @crtc_x: offset x on hardware screen. * @crtc_y: offset y on hardware screen. - * @crtc_width: window width to be displayed (hardware screen). - * @crtc_height: window height to be displayed (hardware screen). - * @mode_width: width of screen mode. - * @mode_height: height of screen mode. + * @crtc_w: window width to be displayed (hardware screen). + * @crtc_h: window height to be displayed (hardware screen). * @h_ratio: horizontal scaling ratio, 16.16 fixed point * @v_ratio: vertical scaling ratio, 16.16 fixed point - * @refresh: refresh rate. - * @scan_flag: interlace or progressive way. - * (it could be DRM_MODE_FLAG_*) - * @bpp: pixel size.(in bit) - * @pixel_format: fourcc pixel format of this overlay * @dma_addr: array of bus(accessed by dma) address to the memory region * allocated for a overlay. * @zpos: order of overlay layer(z position). @@ -73,76 +64,19 @@ struct exynos_drm_plane { struct drm_plane base; unsigned int src_x; unsigned int src_y; - unsigned int src_width; - unsigned int src_height; - unsigned int fb_width; - unsigned int fb_height; + unsigned int src_w; + unsigned int src_h; unsigned int crtc_x; unsigned int crtc_y; - unsigned int crtc_width; - unsigned int crtc_height; - unsigned int mode_width; - unsigned int mode_height; + unsigned int crtc_w; + unsigned int crtc_h; unsigned int h_ratio; unsigned int v_ratio; - unsigned int refresh; - unsigned int scan_flag; - unsigned int bpp; - unsigned int pitch; - uint32_t pixel_format; dma_addr_t dma_addr[MAX_FB_BUFFER]; unsigned int zpos; }; /* - * Exynos DRM Display Structure. - * - this structure is common to analog tv, digital tv and lcd panel. - * - * @create_connector: initialize and register a new connector - * @remove: cleans up the display for removal - * @mode_fixup: fix mode data comparing to hw specific display mode. - * @mode_set: convert drm_display_mode to hw specific display mode and - * would be called by encoder->mode_set(). - * @check_mode: check if mode is valid or not. - * @dpms: display device on or off. - * @commit: apply changes to hw - */ -struct exynos_drm_display; -struct exynos_drm_display_ops { - int (*create_connector)(struct exynos_drm_display *display, - struct drm_encoder *encoder); - void (*remove)(struct exynos_drm_display *display); - void (*mode_fixup)(struct exynos_drm_display *display, - struct drm_connector *connector, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - void (*mode_set)(struct exynos_drm_display *display, - struct drm_display_mode *mode); - int (*check_mode)(struct exynos_drm_display *display, - struct drm_display_mode *mode); - void (*dpms)(struct exynos_drm_display *display, int mode); - void (*commit)(struct exynos_drm_display *display); -}; - -/* - * Exynos drm display structure, maps 1:1 with an encoder/connector - * - * @list: the list entry for this manager - * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI. - * @encoder: encoder object this display maps to - * @connector: connector object this display maps to - * @ops: pointer to callbacks for exynos drm specific functionality - * @ctx: A pointer to the display's implementation specific context - */ -struct exynos_drm_display { - struct list_head list; - enum exynos_drm_output_type type; - struct drm_encoder *encoder; - struct drm_connector *connector; - struct exynos_drm_display_ops *ops; -}; - -/* * Exynos drm crtc ops * * @enable: enable the device @@ -153,8 +87,8 @@ struct exynos_drm_display { * @disable_vblank: specific driver callback for disabling vblank interrupt. * @wait_for_vblank: wait for vblank interrupt to make sure that * hardware overlay is updated. - * @win_commit: apply hardware specific overlay data to registers. - * @win_disable: disable hardware specific overlay. + * @update_plane: apply hardware specific overlay data to registers. + * @disable_plane: disable hardware specific overlay. * @te_handler: trigger to transfer video image at the tearing effect * synchronization signal if there is a page flip request. * @clock_enable: optional function enabling/disabling display domain clock, @@ -173,11 +107,12 @@ struct exynos_drm_crtc_ops { int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); void (*wait_for_vblank)(struct exynos_drm_crtc *crtc); - void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos); - void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos); + void (*update_plane)(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane); + void (*disable_plane)(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane); void (*te_handler)(struct exynos_drm_crtc *crtc); void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable); - void (*clear_channels)(struct exynos_drm_crtc *crtc); }; /* @@ -285,20 +220,23 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); #ifdef CONFIG_DRM_EXYNOS_DPI -struct exynos_drm_display * exynos_dpi_probe(struct device *dev); -int exynos_dpi_remove(struct exynos_drm_display *display); +struct drm_encoder *exynos_dpi_probe(struct device *dev); +int exynos_dpi_remove(struct drm_encoder *encoder); +int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder); #else -static inline struct exynos_drm_display * +static inline struct drm_encoder * exynos_dpi_probe(struct device *dev) { return NULL; } -static inline int exynos_dpi_remove(struct exynos_drm_display *display) +static inline int exynos_dpi_remove(struct drm_encoder *encoder) +{ + return 0; +} +static inline int exynos_dpi_bind(struct drm_device *dev, + struct drm_encoder *encoder) { return 0; } #endif -/* This function creates a encoder and a connector, and initializes them. */ -int exynos_drm_create_enc_conn(struct drm_device *dev, - struct exynos_drm_display *display); extern struct platform_driver fimd_driver; extern struct platform_driver exynos5433_decon_driver; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 0e58b36cb8c2..12b03b364703 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -259,7 +259,7 @@ struct exynos_dsi_driver_data { }; struct exynos_dsi { - struct exynos_drm_display display; + struct drm_encoder encoder; struct mipi_dsi_host dsi_host; struct drm_connector connector; struct device_node *panel_node; @@ -295,9 +295,9 @@ struct exynos_dsi { #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) -static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d) +static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e) { - return container_of(d, struct exynos_dsi, display); + return container_of(e, struct exynos_dsi, encoder); } enum reg_idx { @@ -1272,7 +1272,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id) static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id) { struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id; - struct drm_encoder *encoder = dsi->display.encoder; + struct drm_encoder *encoder = &dsi->encoder; if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE) exynos_drm_crtc_te_handler(encoder->crtc); @@ -1518,16 +1518,17 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi) dev_err(dsi->dev, "cannot disable regulators %d\n", ret); } -static int exynos_dsi_enable(struct exynos_dsi *dsi) +static void exynos_dsi_enable(struct drm_encoder *encoder) { + struct exynos_dsi *dsi = encoder_to_dsi(encoder); int ret; if (dsi->state & DSIM_STATE_ENABLED) - return 0; + return; ret = exynos_dsi_poweron(dsi); if (ret < 0) - return ret; + return; dsi->state |= DSIM_STATE_ENABLED; @@ -1535,7 +1536,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi) if (ret < 0) { dsi->state &= ~DSIM_STATE_ENABLED; exynos_dsi_poweroff(dsi); - return ret; + return; } exynos_dsi_set_display_mode(dsi); @@ -1547,16 +1548,16 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi) exynos_dsi_set_display_enable(dsi, false); drm_panel_unprepare(dsi->panel); exynos_dsi_poweroff(dsi); - return ret; + return; } dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; - - return 0; } -static void exynos_dsi_disable(struct exynos_dsi *dsi) +static void exynos_dsi_disable(struct drm_encoder *encoder) { + struct exynos_dsi *dsi = encoder_to_dsi(encoder); + if (!(dsi->state & DSIM_STATE_ENABLED)) return; @@ -1571,26 +1572,6 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi) exynos_dsi_poweroff(dsi); } -static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode) -{ - struct exynos_dsi *dsi = display_to_dsi(display); - - if (dsi->panel) { - switch (mode) { - case DRM_MODE_DPMS_ON: - exynos_dsi_enable(dsi); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - exynos_dsi_disable(dsi); - break; - default: - break; - } - } -} - static enum drm_connector_status exynos_dsi_detect(struct drm_connector *connector, bool force) { @@ -1601,10 +1582,10 @@ exynos_dsi_detect(struct drm_connector *connector, bool force) if (dsi->panel) drm_panel_attach(dsi->panel, &dsi->connector); } else if (!dsi->panel_node) { - struct exynos_drm_display *display; + struct drm_encoder *encoder; - display = platform_get_drvdata(to_platform_device(dsi->dev)); - exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF); + encoder = platform_get_drvdata(to_platform_device(dsi->dev)); + exynos_dsi_disable(encoder); drm_panel_detach(dsi->panel); dsi->panel = NULL; } @@ -1647,7 +1628,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector) { struct exynos_dsi *dsi = connector_to_dsi(connector); - return dsi->display.encoder; + return &dsi->encoder; } static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { @@ -1655,10 +1636,9 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { .best_encoder = exynos_dsi_best_encoder, }; -static int exynos_dsi_create_connector(struct exynos_drm_display *display, - struct drm_encoder *encoder) +static int exynos_dsi_create_connector(struct drm_encoder *encoder) { - struct exynos_dsi *dsi = display_to_dsi(display); + struct exynos_dsi *dsi = encoder_to_dsi(encoder); struct drm_connector *connector = &dsi->connector; int ret; @@ -1679,26 +1659,40 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display, return 0; } -static void exynos_dsi_mode_set(struct exynos_drm_display *display, - struct drm_display_mode *mode) +static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct exynos_dsi *dsi = display_to_dsi(display); - struct videomode *vm = &dsi->vm; + return true; +} - vm->hactive = mode->hdisplay; - vm->vactive = mode->vdisplay; - vm->vfront_porch = mode->vsync_start - mode->vdisplay; - vm->vback_porch = mode->vtotal - mode->vsync_end; - vm->vsync_len = mode->vsync_end - mode->vsync_start; - vm->hfront_porch = mode->hsync_start - mode->hdisplay; - vm->hback_porch = mode->htotal - mode->hsync_end; - vm->hsync_len = mode->hsync_end - mode->hsync_start; +static void exynos_dsi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct videomode *vm = &dsi->vm; + struct drm_display_mode *m = adjusted_mode; + + vm->hactive = m->hdisplay; + vm->vactive = m->vdisplay; + vm->vfront_porch = m->vsync_start - m->vdisplay; + vm->vback_porch = m->vtotal - m->vsync_end; + vm->vsync_len = m->vsync_end - m->vsync_start; + vm->hfront_porch = m->hsync_start - m->hdisplay; + vm->hback_porch = m->htotal - m->hsync_end; + vm->hsync_len = m->hsync_end - m->hsync_start; } -static struct exynos_drm_display_ops exynos_dsi_display_ops = { - .create_connector = exynos_dsi_create_connector, +static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { + .mode_fixup = exynos_dsi_mode_fixup, .mode_set = exynos_dsi_mode_set, - .dpms = exynos_dsi_dpms + .enable = exynos_dsi_enable, + .disable = exynos_dsi_disable, +}; + +static struct drm_encoder_funcs exynos_dsi_encoder_funcs = { + .destroy = drm_encoder_cleanup, }; MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); @@ -1821,22 +1815,35 @@ end: static int exynos_dsi_bind(struct device *dev, struct device *master, void *data) { - struct exynos_drm_display *display = dev_get_drvdata(dev); - struct exynos_dsi *dsi = display_to_dsi(display); + struct drm_encoder *encoder = dev_get_drvdata(dev); + struct exynos_dsi *dsi = encoder_to_dsi(encoder); struct drm_device *drm_dev = data; struct drm_bridge *bridge; int ret; - ret = exynos_drm_create_enc_conn(drm_dev, display); + ret = exynos_drm_crtc_get_pipe_from_type(drm_dev, + EXYNOS_DISPLAY_TYPE_LCD); + if (ret < 0) + return ret; + + encoder->possible_crtcs = 1 << ret; + + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs); + + ret = exynos_dsi_create_connector(encoder); if (ret) { - DRM_ERROR("Encoder create [%d] failed with %d\n", - display->type, ret); + DRM_ERROR("failed to create connector ret = %d\n", ret); + drm_encoder_cleanup(encoder); return ret; } bridge = of_drm_find_bridge(dsi->bridge_node); if (bridge) { - display->encoder->bridge = bridge; drm_bridge_attach(drm_dev, bridge); } @@ -1846,10 +1853,10 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data) { - struct exynos_drm_display *display = dev_get_drvdata(dev); - struct exynos_dsi *dsi = display_to_dsi(display); + struct drm_encoder *encoder = dev_get_drvdata(dev); + struct exynos_dsi *dsi = encoder_to_dsi(encoder); - exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF); + exynos_dsi_disable(encoder); mipi_dsi_host_unregister(&dsi->dsi_host); } @@ -1870,9 +1877,6 @@ static int exynos_dsi_probe(struct platform_device *pdev) if (!dsi) return -ENOMEM; - dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD; - dsi->display.ops = &exynos_dsi_display_ops; - /* To be checked as invalid one */ dsi->te_gpio = -ENOENT; @@ -1948,7 +1952,7 @@ static int exynos_dsi_probe(struct platform_device *pdev) return ret; } - platform_set_drvdata(pdev, &dsi->display); + platform_set_drvdata(pdev, &dsi->encoder); return component_add(dev, &exynos_dsi_component_ops); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c deleted file mode 100644 index 7b89fd520e45..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ /dev/null @@ -1,174 +0,0 @@ -/* exynos_drm_encoder.c - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> - -#include "exynos_drm_drv.h" -#include "exynos_drm_encoder.h" - -#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ - drm_encoder) - -/* - * exynos specific encoder structure. - * - * @drm_encoder: encoder object. - * @display: the display structure that maps to this encoder - */ -struct exynos_drm_encoder { - struct drm_encoder drm_encoder; - struct exynos_drm_display *display; -}; - -static bool -exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = encoder->dev; - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - struct exynos_drm_display *display = exynos_encoder->display; - struct drm_connector *connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder != encoder) - continue; - - if (display->ops->mode_fixup) - display->ops->mode_fixup(display, connector, mode, - adjusted_mode); - } - - return true; -} - -static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - struct exynos_drm_display *display = exynos_encoder->display; - - if (display->ops->mode_set) - display->ops->mode_set(display, adjusted_mode); -} - -static void exynos_drm_encoder_enable(struct drm_encoder *encoder) -{ - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - struct exynos_drm_display *display = exynos_encoder->display; - - if (display->ops->dpms) - display->ops->dpms(display, DRM_MODE_DPMS_ON); - - if (display->ops->commit) - display->ops->commit(display); -} - -static void exynos_drm_encoder_disable(struct drm_encoder *encoder) -{ - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - struct exynos_drm_display *display = exynos_encoder->display; - - if (display->ops->dpms) - display->ops->dpms(display, DRM_MODE_DPMS_OFF); -} - -static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { - .mode_fixup = exynos_drm_encoder_mode_fixup, - .mode_set = exynos_drm_encoder_mode_set, - .enable = exynos_drm_encoder_enable, - .disable = exynos_drm_encoder_disable, -}; - -static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) -{ - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - - drm_encoder_cleanup(encoder); - kfree(exynos_encoder); -} - -static struct drm_encoder_funcs exynos_encoder_funcs = { - .destroy = exynos_drm_encoder_destroy, -}; - -static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder) -{ - struct drm_encoder *clone; - struct drm_device *dev = encoder->dev; - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - struct exynos_drm_display *display = exynos_encoder->display; - unsigned int clone_mask = 0; - int cnt = 0; - - list_for_each_entry(clone, &dev->mode_config.encoder_list, head) { - switch (display->type) { - case EXYNOS_DISPLAY_TYPE_LCD: - case EXYNOS_DISPLAY_TYPE_HDMI: - case EXYNOS_DISPLAY_TYPE_VIDI: - clone_mask |= (1 << (cnt++)); - break; - default: - continue; - } - } - - return clone_mask; -} - -void exynos_drm_encoder_setup(struct drm_device *dev) -{ - struct drm_encoder *encoder; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) - encoder->possible_clones = exynos_drm_encoder_clones(encoder); -} - -struct drm_encoder * -exynos_drm_encoder_create(struct drm_device *dev, - struct exynos_drm_display *display, - unsigned long possible_crtcs) -{ - struct drm_encoder *encoder; - struct exynos_drm_encoder *exynos_encoder; - - if (!possible_crtcs) - return NULL; - - exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); - if (!exynos_encoder) - return NULL; - - exynos_encoder->display = display; - encoder = &exynos_encoder->drm_encoder; - encoder->possible_crtcs = possible_crtcs; - - DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); - - drm_encoder_init(dev, encoder, &exynos_encoder_funcs, - DRM_MODE_ENCODER_TMDS); - - drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs); - - DRM_DEBUG_KMS("encoder has been created\n"); - - return encoder; -} - -struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder) -{ - return to_exynos_encoder(encoder)->display; -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h deleted file mode 100644 index 26305d8dd93a..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_ENCODER_H_ -#define _EXYNOS_DRM_ENCODER_H_ - -void exynos_drm_encoder_setup(struct drm_device *dev); -struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev, - struct exynos_drm_display *mgr, - unsigned long possible_crtcs); -struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder); - -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 2b6320e6eae2..9738f4e0c6eb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -238,22 +238,22 @@ err_free: return ERR_PTR(ret); } -struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, - int index) +struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, + int index) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - struct exynos_drm_gem_buf *buffer; + struct exynos_drm_gem_obj *obj; if (index >= MAX_FB_BUFFER) return NULL; - buffer = exynos_fb->exynos_gem_obj[index]->buffer; - if (!buffer) + obj = exynos_fb->exynos_gem_obj[index]; + if (!obj) return NULL; - DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr); + DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr); - return buffer; + return obj; } static void exynos_drm_output_poll_changed(struct drm_device *dev) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 517471b37566..1c9e27c32cd1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -19,8 +19,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); -/* get memory information of a drm framebuffer */ -struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, +/* get gem object of a drm framebuffer */ +struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb, int index); void exynos_drm_mode_config_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index e0b085b4bdfa..624595afbce0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -40,8 +40,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info, { struct drm_fb_helper *helper = info->par; struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); - struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; - struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer; + struct exynos_drm_gem_obj *obj = exynos_fbd->exynos_gem_obj; unsigned long vm_size; int ret; @@ -49,11 +48,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info, vm_size = vma->vm_end - vma->vm_start; - if (vm_size > buffer->size) + if (vm_size > obj->size) return -EINVAL; - ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages, - buffer->dma_addr, buffer->size, &buffer->dma_attrs); + ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr, + obj->size, &obj->dma_attrs); if (ret < 0) { DRM_ERROR("failed to mmap.\n"); return ret; @@ -65,9 +64,9 @@ static int exynos_drm_fb_mmap(struct fb_info *info, static struct fb_ops exynos_drm_fb_ops = { .owner = THIS_MODULE, .fb_mmap = exynos_drm_fb_mmap, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, @@ -80,7 +79,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, struct drm_framebuffer *fb) { struct fb_info *fbi = helper->fbdev; - struct exynos_drm_gem_buf *buffer; + struct exynos_drm_gem_obj *obj; unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); unsigned int nr_pages; unsigned long offset; @@ -89,18 +88,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); /* RGB formats use only one buffer */ - buffer = exynos_drm_fb_buffer(fb, 0); - if (!buffer) { - DRM_DEBUG_KMS("buffer is null.\n"); + obj = exynos_drm_fb_gem_obj(fb, 0); + if (!obj) { + DRM_DEBUG_KMS("gem object is null.\n"); return -EFAULT; } - nr_pages = buffer->size >> PAGE_SHIFT; + nr_pages = obj->size >> PAGE_SHIFT; - buffer->kvaddr = (void __iomem *) vmap(buffer->pages, - nr_pages, VM_MAP, + obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); - if (!buffer->kvaddr) { + if (!obj->kvaddr) { DRM_ERROR("failed to map pages to kernel space.\n"); return -EIO; } @@ -111,7 +109,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); offset += fbi->var.yoffset * fb->pitches[0]; - fbi->screen_base = buffer->kvaddr + offset; + fbi->screen_base = obj->kvaddr + offset; fbi->screen_size = size; fbi->fix.smem_len = size; @@ -142,10 +140,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, mutex_lock(&dev->struct_mutex); - fbi = framebuffer_alloc(0, &pdev->dev); - if (!fbi) { + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { DRM_ERROR("failed to allocate fb info.\n"); - ret = -ENOMEM; + ret = PTR_ERR(fbi); goto out; } @@ -165,7 +163,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, if (IS_ERR(exynos_gem_obj)) { ret = PTR_ERR(exynos_gem_obj); - goto err_release_framebuffer; + goto err_release_fbi; } exynos_fbdev->exynos_gem_obj = exynos_gem_obj; @@ -178,33 +176,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, goto err_destroy_gem; } - helper->fbdev = fbi; - fbi->par = helper; fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &exynos_drm_fb_ops; - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - DRM_ERROR("failed to allocate cmap.\n"); - goto err_destroy_framebuffer; - } - ret = exynos_drm_fbdev_update(helper, sizes, helper->fb); if (ret < 0) - goto err_dealloc_cmap; + goto err_destroy_framebuffer; mutex_unlock(&dev->struct_mutex); return ret; -err_dealloc_cmap: - fb_dealloc_cmap(&fbi->cmap); err_destroy_framebuffer: drm_framebuffer_cleanup(helper->fb); err_destroy_gem: exynos_drm_gem_destroy(exynos_gem_obj); -err_release_framebuffer: - framebuffer_release(fbi); +err_release_fbi: + drm_fb_helper_release_fbi(helper); /* * if failed, all resources allocated above would be released by @@ -300,8 +288,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; struct drm_framebuffer *fb; - if (exynos_gem_obj->buffer->kvaddr) - vunmap(exynos_gem_obj->buffer->kvaddr); + if (exynos_gem_obj->kvaddr) + vunmap(exynos_gem_obj->kvaddr); /* release drm framebuffer and real buffer */ if (fb_helper->fb && fb_helper->fb->funcs) { @@ -312,21 +300,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, } } - /* release linux framebuffer */ - if (fb_helper->fbdev) { - struct fb_info *info; - int ret; - - info = fb_helper->fbdev; - ret = unregister_framebuffer(info); - if (ret < 0) - DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); - - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(fb_helper); + drm_fb_helper_release_fbi(fb_helper); drm_fb_helper_fini(fb_helper); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 842d6b8dc3c4..2a652359af64 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev) spin_lock_init(&ctx->lock); platform_set_drvdata(pdev, ctx); - pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = exynos_drm_ippdrv_register(ippdrv); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 794e56c8798e..5def6bc073eb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -169,7 +169,7 @@ struct fimd_context { struct exynos_drm_panel_info panel; struct fimd_driver_data *driver_data; - struct exynos_drm_display *display; + struct drm_encoder *encoder; }; static const struct of_device_id fimd_driver_dt_match[] = { @@ -348,13 +348,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc) pm_runtime_put(ctx->dev); } -static void fimd_iommu_detach_devices(struct fimd_context *ctx) -{ - /* detach this sub driver from iommu mapping if supported. */ - if (is_drm_iommu_supported(ctx->drm_dev)) - drm_iommu_detach_device(ctx->drm_dev, ctx->dev); -} - static u32 fimd_calc_clkdiv(struct fimd_context *ctx, const struct drm_display_mode *mode) { @@ -486,9 +479,9 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) } -static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) +static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, + struct drm_framebuffer *fb) { - struct exynos_drm_plane *plane = &ctx->planes[win]; unsigned long val; val = WINCONx_ENWIN; @@ -498,11 +491,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) * So the request format is ARGB8888 then change it to XRGB8888. */ if (ctx->driver_data->has_limited_fmt && !win) { - if (plane->pixel_format == DRM_FORMAT_ARGB8888) - plane->pixel_format = DRM_FORMAT_XRGB8888; + if (fb->pixel_format == DRM_FORMAT_ARGB8888) + fb->pixel_format = DRM_FORMAT_XRGB8888; } - switch (plane->pixel_format) { + switch (fb->pixel_format) { case DRM_FORMAT_C8: val |= WINCON0_BPPMODE_8BPP_PALETTE; val |= WINCONx_BURSTLEN_8WORD; @@ -538,7 +531,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) break; } - DRM_DEBUG_KMS("bpp = %d\n", plane->bpp); + DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel); /* * In case of exynos, setting dma-burst to 16Word causes permanent @@ -548,7 +541,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) * movement causes unstable DMA which results into iommu crash/tear. */ - if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) { + if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) { val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_4WORD; } @@ -614,21 +607,17 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx, writel(val, ctx->regs + reg); } -static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) +static void fimd_update_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct fimd_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; + struct drm_plane_state *state = plane->base.state; dma_addr_t dma_addr; unsigned long val, size, offset; unsigned int last_x, last_y, buf_offsize, line_size; - - if (ctx->suspended) - return; - - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; + unsigned int win = plane->zpos; + unsigned int bpp = state->fb->bits_per_pixel >> 3; + unsigned int pitch = state->fb->pitches[0]; if (ctx->suspended) return; @@ -647,8 +636,8 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) fimd_shadow_protect_win(ctx, win, true); - offset = plane->src_x * (plane->bpp >> 3); - offset += plane->src_y * plane->pitch; + offset = plane->src_x * bpp; + offset += plane->src_y * pitch; /* buffer start address */ dma_addr = plane->dma_addr[0] + offset; @@ -656,18 +645,18 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); /* buffer end address */ - size = plane->pitch * plane->crtc_height; + size = pitch * plane->crtc_h; val = (unsigned long)(dma_addr + size); writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", (unsigned long)dma_addr, val, size); DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", - plane->crtc_width, plane->crtc_height); + plane->crtc_w, plane->crtc_h); /* buffer size */ - buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3)); - line_size = plane->crtc_width * (plane->bpp >> 3); + buf_offsize = pitch - (plane->crtc_w * bpp); + line_size = plane->crtc_w * bpp; val = VIDW_BUF_SIZE_OFFSET(buf_offsize) | VIDW_BUF_SIZE_PAGEWIDTH(line_size) | VIDW_BUF_SIZE_OFFSET_E(buf_offsize) | @@ -681,10 +670,10 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y); writel(val, ctx->regs + VIDOSD_A(win)); - last_x = plane->crtc_x + plane->crtc_width; + last_x = plane->crtc_x + plane->crtc_w; if (last_x) last_x--; - last_y = plane->crtc_y + plane->crtc_height; + last_y = plane->crtc_y + plane->crtc_h; if (last_y) last_y--; @@ -701,13 +690,13 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) u32 offset = VIDOSD_D(win); if (win == 0) offset = VIDOSD_C(win); - val = plane->crtc_width * plane->crtc_height; + val = plane->crtc_w * plane->crtc_h; writel(val, ctx->regs + offset); DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); } - fimd_win_set_pixfmt(ctx, win); + fimd_win_set_pixfmt(ctx, win, state->fb); /* hardware window 0 doesn't support color key. */ if (win != 0) @@ -725,15 +714,11 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) atomic_set(&ctx->win_updated, 1); } -static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) +static void fimd_disable_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct fimd_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; - - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; + unsigned int win = plane->zpos; if (ctx->suspended) return; @@ -795,7 +780,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc) * a destroyed buffer later. */ for (i = 0; i < WINDOWS_NR; i++) - fimd_win_disable(crtc, i); + fimd_disable_plane(crtc, &ctx->planes[i]); fimd_enable_vblank(crtc); fimd_wait_for_vblank(crtc); @@ -862,7 +847,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc) } if (test_bit(0, &ctx->irq_flags)) - drm_handle_vblank(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); } static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) @@ -890,11 +875,10 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, .wait_for_vblank = fimd_wait_for_vblank, - .win_commit = fimd_win_commit, - .win_disable = fimd_win_disable, + .update_plane = fimd_update_plane, + .disable_plane = fimd_disable_plane, .te_handler = fimd_te_handler, .clock_enable = fimd_dp_clock_enable, - .clear_channels = fimd_clear_channels, }; static irqreturn_t fimd_irq_handler(int irq, void *dev_id) @@ -913,13 +897,13 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) goto out; if (ctx->i80_if) { - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + exynos_drm_crtc_finish_pageflip(ctx->crtc); /* Exits triggering mode */ atomic_set(&ctx->triggering, 0); } else { - drm_handle_vblank(ctx->drm_dev, ctx->pipe); - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); + exynos_drm_crtc_finish_pageflip(ctx->crtc); /* set wait vsync event to zero and wake up queue. */ if (atomic_read(&ctx->wait_vsync_event)) { @@ -961,10 +945,13 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) if (IS_ERR(ctx->crtc)) return PTR_ERR(ctx->crtc); - if (ctx->display) - exynos_drm_create_enc_conn(drm_dev, ctx->display); + if (ctx->encoder) + exynos_dpi_bind(drm_dev, ctx->encoder); + + if (is_drm_iommu_supported(drm_dev)) + fimd_clear_channels(ctx->crtc); - ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev); + ret = drm_iommu_attach_device(drm_dev, dev); if (ret) priv->pipe--; @@ -978,10 +965,10 @@ static void fimd_unbind(struct device *dev, struct device *master, fimd_disable(ctx->crtc); - fimd_iommu_detach_devices(ctx); + drm_iommu_detach_device(ctx->drm_dev, ctx->dev); - if (ctx->display) - exynos_dpi_remove(ctx->display); + if (ctx->encoder) + exynos_dpi_remove(ctx->encoder); } static const struct component_ops fimd_component_ops = { @@ -1088,10 +1075,9 @@ static int fimd_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); - ctx->display = exynos_dpi_probe(dev); - if (IS_ERR(ctx->display)) { - return PTR_ERR(ctx->display); - } + ctx->encoder = exynos_dpi_probe(dev); + if (IS_ERR(ctx->encoder)) + return PTR_ERR(ctx->encoder); pm_runtime_enable(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 81a250830808..ba008391a2fc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1319,9 +1319,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) return ret; } - if (!is_drm_iommu_supported(drm_dev)) - return 0; - ret = drm_iommu_attach_device(drm_dev, dev); if (ret < 0) { dev_err(dev, "failed to enable iommu.\n"); @@ -1334,9 +1331,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev) { - if (!is_drm_iommu_supported(drm_dev)) - return; - drm_iommu_detach_device(drm_dev, dev); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 0d5b9698d384..67461b77f040 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -13,98 +13,112 @@ #include <drm/drm_vma_manager.h> #include <linux/shmem_fs.h> +#include <linux/dma-buf.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" -#include "exynos_drm_buf.h" #include "exynos_drm_iommu.h" -static unsigned int convert_to_vm_err_msg(int msg) +static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) { - unsigned int out_msg; + struct drm_device *dev = obj->base.dev; + enum dma_attr attr; + unsigned int nr_pages; - switch (msg) { - case 0: - case -ERESTARTSYS: - case -EINTR: - out_msg = VM_FAULT_NOPAGE; - break; + if (obj->dma_addr) { + DRM_DEBUG_KMS("already allocated.\n"); + return 0; + } - case -ENOMEM: - out_msg = VM_FAULT_OOM; - break; + init_dma_attrs(&obj->dma_attrs); - default: - out_msg = VM_FAULT_SIGBUS; - break; - } + /* + * if EXYNOS_BO_CONTIG, fully physically contiguous memory + * region will be allocated else physically contiguous + * as possible. + */ + if (!(obj->flags & EXYNOS_BO_NONCONTIG)) + dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs); - return out_msg; -} + /* + * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping + * else cachable mapping. + */ + if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE)) + attr = DMA_ATTR_WRITE_COMBINE; + else + attr = DMA_ATTR_NON_CONSISTENT; -static int check_gem_flags(unsigned int flags) -{ - if (flags & ~(EXYNOS_BO_MASK)) { - DRM_ERROR("invalid flags.\n"); - return -EINVAL; - } + dma_set_attr(attr, &obj->dma_attrs); + dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs); - return 0; -} + nr_pages = obj->size >> PAGE_SHIFT; -static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, - struct vm_area_struct *vma) -{ - DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags); + if (!is_drm_iommu_supported(dev)) { + dma_addr_t start_addr; + unsigned int i = 0; - /* non-cachable as default. */ - if (obj->flags & EXYNOS_BO_CACHABLE) - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - else if (obj->flags & EXYNOS_BO_WC) - vma->vm_page_prot = - pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - else - vma->vm_page_prot = - pgprot_noncached(vm_get_page_prot(vma->vm_flags)); -} + obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); + if (!obj->pages) { + DRM_ERROR("failed to allocate pages.\n"); + return -ENOMEM; + } -static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) -{ - /* TODO */ + obj->cookie = dma_alloc_attrs(dev->dev, + obj->size, + &obj->dma_addr, GFP_KERNEL, + &obj->dma_attrs); + if (!obj->cookie) { + DRM_ERROR("failed to allocate buffer.\n"); + drm_free_large(obj->pages); + return -ENOMEM; + } + + start_addr = obj->dma_addr; + while (i < nr_pages) { + obj->pages[i] = phys_to_page(start_addr); + start_addr += PAGE_SIZE; + i++; + } + } else { + obj->pages = dma_alloc_attrs(dev->dev, obj->size, + &obj->dma_addr, GFP_KERNEL, + &obj->dma_attrs); + if (!obj->pages) { + DRM_ERROR("failed to allocate buffer.\n"); + return -ENOMEM; + } + } + + DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", + (unsigned long)obj->dma_addr, + obj->size); - return roundup(size, PAGE_SIZE); + return 0; } -static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma, - unsigned long f_vaddr, - pgoff_t page_offset) +static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) { - struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; - struct scatterlist *sgl; - unsigned long pfn; - int i; - - if (!buf->sgt) - return -EINTR; + struct drm_device *dev = obj->base.dev; - if (page_offset >= (buf->size >> PAGE_SHIFT)) { - DRM_ERROR("invalid page offset\n"); - return -EINVAL; + if (!obj->dma_addr) { + DRM_DEBUG_KMS("dma_addr is invalid.\n"); + return; } - sgl = buf->sgt->sgl; - for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { - if (page_offset < (sgl->length >> PAGE_SHIFT)) - break; - page_offset -= (sgl->length >> PAGE_SHIFT); - } + DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", + (unsigned long)obj->dma_addr, obj->size); - pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; + if (!is_drm_iommu_supported(dev)) { + dma_free_attrs(dev->dev, obj->size, obj->cookie, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); + drm_free_large(obj->pages); + } else + dma_free_attrs(dev->dev, obj->size, obj->pages, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - return vm_insert_mixed(vma, f_vaddr, pfn); + obj->dma_addr = (dma_addr_t)NULL; } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -131,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) { - struct drm_gem_object *obj; - struct exynos_drm_gem_buf *buf; - - obj = &exynos_gem_obj->base; - buf = exynos_gem_obj->buffer; + struct drm_gem_object *obj = &exynos_gem_obj->base; DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); @@ -148,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) if (obj->import_attach) goto out; - exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); + exynos_drm_free_buf(exynos_gem_obj); out: - exynos_drm_fini_buf(obj->dev, buf); - exynos_gem_obj->buffer = NULL; - drm_gem_free_mmap_offset(obj); /* release file pointer to gem object. */ @@ -180,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); - return exynos_gem_obj->buffer->size; + return exynos_gem_obj->size; } @@ -193,7 +200,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); if (!exynos_gem_obj) - return NULL; + return ERR_PTR(-ENOMEM); exynos_gem_obj->size = size; obj = &exynos_gem_obj->base; @@ -202,7 +209,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, if (ret < 0) { DRM_ERROR("failed to initialize gem object\n"); kfree(exynos_gem_obj); - return NULL; + return ERR_PTR(ret); } DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); @@ -215,47 +222,35 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, unsigned long size) { struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_gem_buf *buf; int ret; + if (flags & ~(EXYNOS_BO_MASK)) { + DRM_ERROR("invalid flags.\n"); + return ERR_PTR(-EINVAL); + } + if (!size) { DRM_ERROR("invalid size.\n"); return ERR_PTR(-EINVAL); } - size = roundup_gem_size(size, flags); - - ret = check_gem_flags(flags); - if (ret) - return ERR_PTR(ret); - - buf = exynos_drm_init_buf(dev, size); - if (!buf) - return ERR_PTR(-ENOMEM); + size = roundup(size, PAGE_SIZE); exynos_gem_obj = exynos_drm_gem_init(dev, size); - if (!exynos_gem_obj) { - ret = -ENOMEM; - goto err_fini_buf; - } - - exynos_gem_obj->buffer = buf; + if (IS_ERR(exynos_gem_obj)) + return exynos_gem_obj; /* set memory type and cache attribute from user side. */ exynos_gem_obj->flags = flags; - ret = exynos_drm_alloc_buf(dev, buf, flags); - if (ret < 0) - goto err_gem_fini; + ret = exynos_drm_alloc_buf(exynos_gem_obj); + if (ret < 0) { + drm_gem_object_release(&exynos_gem_obj->base); + kfree(exynos_gem_obj); + return ERR_PTR(ret); + } return exynos_gem_obj; - -err_gem_fini: - drm_gem_object_release(&exynos_gem_obj->base); - kfree(exynos_gem_obj); -err_fini_buf: - exynos_drm_fini_buf(dev, buf); - return ERR_PTR(ret); } int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, @@ -294,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, exynos_gem_obj = to_exynos_gem_obj(obj); - return &exynos_gem_obj->buffer->dma_addr; + return &exynos_gem_obj->dma_addr; } void exynos_drm_gem_put_dma_addr(struct drm_device *dev, @@ -322,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) { struct drm_device *drm_dev = exynos_gem_obj->base.dev; - struct exynos_drm_gem_buf *buffer; unsigned long vm_size; int ret; @@ -331,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, vm_size = vma->vm_end - vma->vm_start; - /* - * a buffer contains information to physically continuous memory - * allocated by user request or at framebuffer creation. - */ - buffer = exynos_gem_obj->buffer; - /* check if user-requested size is valid. */ - if (vm_size > buffer->size) + if (vm_size > exynos_gem_obj->size) return -EINVAL; - ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, - buffer->dma_addr, buffer->size, - &buffer->dma_attrs); + ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages, + exynos_gem_obj->dma_addr, exynos_gem_obj->size, + &exynos_gem_obj->dma_attrs); if (ret < 0) { DRM_ERROR("failed to mmap.\n"); return ret; @@ -503,15 +491,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, void exynos_drm_gem_free_object(struct drm_gem_object *obj) { - struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_gem_buf *buf; - - exynos_gem_obj = to_exynos_gem_obj(obj); - buf = exynos_gem_obj->buffer; - - if (obj->import_attach) - drm_prime_gem_destroy(obj, buf->sgt); - exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); } @@ -595,24 +574,34 @@ unlock: int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; - unsigned long f_vaddr; + struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); + unsigned long pfn; pgoff_t page_offset; int ret; page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; - f_vaddr = (unsigned long)vmf->virtual_address; - - mutex_lock(&dev->struct_mutex); - ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); - if (ret < 0) - DRM_ERROR("failed to map a buffer with user.\n"); + if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) { + DRM_ERROR("invalid page offset\n"); + ret = -EINVAL; + goto out; + } - mutex_unlock(&dev->struct_mutex); + pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]); + ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); - return convert_to_vm_err_msg(ret); +out: + switch (ret) { + case 0: + case -ERESTARTSYS: + case -EINTR: + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } } int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) @@ -631,11 +620,17 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) obj = vma->vm_private_data; exynos_gem_obj = to_exynos_gem_obj(obj); - ret = check_gem_flags(exynos_gem_obj->flags); - if (ret) - goto err_close_vm; + DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags); - update_vm_cache_attr(exynos_gem_obj, vma); + /* non-cachable as default. */ + if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE) + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + else if (exynos_gem_obj->flags & EXYNOS_BO_WC) + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + else + vma->vm_page_prot = + pgprot_noncached(vm_get_page_prot(vma->vm_flags)); ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma); if (ret) @@ -649,3 +644,76 @@ err_close_vm: return ret; } + +/* low-level interface prime helpers */ +struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); + int npages; + + npages = exynos_gem_obj->size >> PAGE_SHIFT; + + return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages); +} + +struct drm_gem_object * +exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct exynos_drm_gem_obj *exynos_gem_obj; + int npages; + int ret; + + exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size); + if (IS_ERR(exynos_gem_obj)) { + ret = PTR_ERR(exynos_gem_obj); + goto err; + } + + exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl); + + npages = exynos_gem_obj->size >> PAGE_SHIFT; + exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (!exynos_gem_obj->pages) { + ret = -ENOMEM; + goto err; + } + + ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL, + npages); + if (ret < 0) + goto err_free_large; + + if (sgt->nents == 1) { + /* always physically continuous memory if sgt->nents is 1. */ + exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; + } else { + /* + * this case could be CONTIG or NONCONTIG type but for now + * sets NONCONTIG. + * TODO. we have to find a way that exporter can notify + * the type of its own buffer to importer. + */ + exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; + } + + return &exynos_gem_obj->base; + +err_free_large: + drm_free_large(exynos_gem_obj->pages); +err: + drm_gem_object_release(&exynos_gem_obj->base); + kfree(exynos_gem_obj); + return ERR_PTR(ret); +} + +void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) +{ + return NULL; +} + +void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + /* Nothing to do */ +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 6f42e2248288..cd62f8410d1e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -20,35 +20,6 @@ #define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG) /* - * exynos drm gem buffer structure. - * - * @cookie: cookie returned by dma_alloc_attrs - * @kvaddr: kernel virtual address to allocated memory region. - * *userptr: user space address. - * @dma_addr: bus address(accessed by dma) to allocated memory region. - * - this address could be physical address without IOMMU and - * device address with IOMMU. - * @write: whether pages will be written to by the caller. - * @pages: Array of backing pages. - * @sgt: sg table to transfer page data. - * @size: size of allocated memory region. - * @pfnmap: indicate whether memory region from userptr is mmaped with - * VM_PFNMAP or not. - */ -struct exynos_drm_gem_buf { - void *cookie; - void __iomem *kvaddr; - unsigned long userptr; - dma_addr_t dma_addr; - struct dma_attrs dma_attrs; - unsigned int write; - struct page **pages; - struct sg_table *sgt; - unsigned long size; - bool pfnmap; -}; - -/* * exynos drm buffer structure. * * @base: a gem object. @@ -59,18 +30,28 @@ struct exynos_drm_gem_buf { * by user request or at framebuffer creation. * continuous memory region allocated by user request * or at framebuffer creation. + * @flags: indicate memory type to allocated buffer and cache attruibute. * @size: size requested from user, in bytes and this size is aligned * in page unit. - * @flags: indicate memory type to allocated buffer and cache attruibute. + * @cookie: cookie returned by dma_alloc_attrs + * @kvaddr: kernel virtual address to allocated memory region. + * @dma_addr: bus address(accessed by dma) to allocated memory region. + * - this address could be physical address without IOMMU and + * device address with IOMMU. + * @pages: Array of backing pages. * * P.S. this object would be transferred to user as kms_bo.handle so * user can access the buffer through kms_bo.handle. */ struct exynos_drm_gem_obj { - struct drm_gem_object base; - struct exynos_drm_gem_buf *buffer; - unsigned long size; - unsigned int flags; + struct drm_gem_object base; + unsigned int flags; + unsigned long size; + void *cookie; + void __iomem *kvaddr; + dma_addr_t dma_addr; + struct dma_attrs dma_attrs; + struct page **pages; }; struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); @@ -177,4 +158,13 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, struct sg_table *sgt, enum dma_data_direction dir); +/* low-level interface prime helpers */ +struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj); +void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 8040ed2a831f..808a0a013780 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -582,9 +582,17 @@ static int gsc_src_set_transf(struct device *dev, break; case EXYNOS_DRM_DEGREE_180: cfg |= GSC_IN_ROT_180; + if (flip & EXYNOS_DRM_FLIP_VERTICAL) + cfg &= ~GSC_IN_ROT_XFLIP; + if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + cfg &= ~GSC_IN_ROT_YFLIP; break; case EXYNOS_DRM_DEGREE_270: cfg |= GSC_IN_ROT_270; + if (flip & EXYNOS_DRM_FLIP_VERTICAL) + cfg &= ~GSC_IN_ROT_XFLIP; + if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + cfg &= ~GSC_IN_ROT_YFLIP; break; default: dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); @@ -593,8 +601,7 @@ static int gsc_src_set_transf(struct device *dev, gsc_write(cfg, GSC_IN_CON); - ctx->rotation = cfg & - (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0; + ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; *swap = ctx->rotation; return 0; @@ -846,9 +853,17 @@ static int gsc_dst_set_transf(struct device *dev, break; case EXYNOS_DRM_DEGREE_180: cfg |= GSC_IN_ROT_180; + if (flip & EXYNOS_DRM_FLIP_VERTICAL) + cfg &= ~GSC_IN_ROT_XFLIP; + if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + cfg &= ~GSC_IN_ROT_YFLIP; break; case EXYNOS_DRM_DEGREE_270: cfg |= GSC_IN_ROT_270; + if (flip & EXYNOS_DRM_FLIP_VERTICAL) + cfg &= ~GSC_IN_ROT_XFLIP; + if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + cfg &= ~GSC_IN_ROT_YFLIP; break; default: dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); @@ -857,8 +872,7 @@ static int gsc_dst_set_transf(struct device *dev, gsc_write(cfg, GSC_IN_CON); - ctx->rotation = cfg & - (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0; + ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; *swap = ctx->rotation; return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c index d4ec7465e9cc..055e8ec2ef21 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c @@ -87,10 +87,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev, struct device *dev = drm_dev->dev; int ret; - if (!dev->archdata.mapping) { - DRM_ERROR("iommu_mapping is null.\n"); - return -EFAULT; - } + if (!dev->archdata.mapping) + return 0; subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, sizeof(*subdrv_dev->dma_parms), @@ -144,17 +142,3 @@ void drm_iommu_detach_device(struct drm_device *drm_dev, iommu_detach_device(mapping->domain, subdrv_dev); drm_release_iommu_mapping(drm_dev); } - -int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc, - struct drm_device *drm_dev, struct device *subdrv_dev) -{ - int ret = 0; - - if (is_drm_iommu_supported(drm_dev)) { - if (exynos_crtc->ops->clear_channels) - exynos_crtc->ops->clear_channels(exynos_crtc); - return drm_iommu_attach_device(drm_dev, subdrv_dev); - } - - return ret; -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h index 8341c7a475b4..dc1b5441f491 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h @@ -29,19 +29,11 @@ void drm_iommu_detach_device(struct drm_device *dev_dev, static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) { -#ifdef CONFIG_ARM_DMA_USE_IOMMU struct device *dev = drm_dev->dev; return dev->archdata.mapping ? true : false; -#else - return false; -#endif } -int drm_iommu_attach_device_if_possible( - struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev, - struct device *subdrv_dev); - #else static inline int drm_create_iommu_mapping(struct drm_device *drm_dev) @@ -69,12 +61,5 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) return false; } -static inline int drm_iommu_attach_device_if_possible( - struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev, - struct device *subdrv_dev) -{ - return 0; -} - #endif #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 67e5451e066f..67d24236e745 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -1622,12 +1622,10 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) INIT_LIST_HEAD(&ippdrv->cmd_list); mutex_init(&ippdrv->cmd_lock); - if (is_drm_iommu_supported(drm_dev)) { - ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); - if (ret) { - DRM_ERROR("failed to activate iommu\n"); - goto err; - } + ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); + if (ret) { + DRM_ERROR("failed to activate iommu\n"); + goto err; } } @@ -1637,8 +1635,7 @@ err: /* get ipp driver entry */ list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_detach_device(drm_dev, ippdrv->dev); + drm_iommu_detach_device(drm_dev, ippdrv->dev); ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv->prop_list.ipp_id); @@ -1654,8 +1651,7 @@ static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) /* get ipp driver entry */ list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) { - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_detach_device(drm_dev, ippdrv->dev); + drm_iommu_detach_device(drm_dev, ippdrv->dev); ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv->prop_list.ipp_id); diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index a729980d3c2f..d9a68fd83120 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -97,29 +97,18 @@ static void exynos_plane_mode_set(struct drm_plane *plane, /* set drm framebuffer data. */ exynos_plane->src_x = src_x; exynos_plane->src_y = src_y; - exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16; - exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16; - exynos_plane->fb_width = fb->width; - exynos_plane->fb_height = fb->height; - exynos_plane->bpp = fb->bits_per_pixel; - exynos_plane->pitch = fb->pitches[0]; - exynos_plane->pixel_format = fb->pixel_format; + exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16; + exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16; /* set plane range to be displayed. */ exynos_plane->crtc_x = crtc_x; exynos_plane->crtc_y = crtc_y; - exynos_plane->crtc_width = actual_w; - exynos_plane->crtc_height = actual_h; - - /* set drm mode data. */ - exynos_plane->mode_width = mode->hdisplay; - exynos_plane->mode_height = mode->vdisplay; - exynos_plane->refresh = mode->vrefresh; - exynos_plane->scan_flag = mode->flags; + exynos_plane->crtc_w = actual_w; + exynos_plane->crtc_h = actual_h; DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)", exynos_plane->crtc_x, exynos_plane->crtc_y, - exynos_plane->crtc_width, exynos_plane->crtc_height); + exynos_plane->crtc_w, exynos_plane->crtc_h); plane->crtc = crtc; } @@ -145,15 +134,15 @@ static int exynos_plane_atomic_check(struct drm_plane *plane, nr = exynos_drm_fb_get_buf_cnt(state->fb); for (i = 0; i < nr; i++) { - struct exynos_drm_gem_buf *buffer = - exynos_drm_fb_buffer(state->fb, i); + struct exynos_drm_gem_obj *obj = + exynos_drm_fb_gem_obj(state->fb, i); - if (!buffer) { - DRM_DEBUG_KMS("buffer is null\n"); + if (!obj) { + DRM_DEBUG_KMS("gem object is null\n"); return -EFAULT; } - exynos_plane->dma_addr[i] = buffer->dma_addr + + exynos_plane->dma_addr[i] = obj->dma_addr + state->fb->offsets[i]; DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", @@ -179,8 +168,8 @@ static void exynos_plane_atomic_update(struct drm_plane *plane, state->src_x >> 16, state->src_y >> 16, state->src_w >> 16, state->src_h >> 16); - if (exynos_crtc->ops->win_commit) - exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos); + if (exynos_crtc->ops->update_plane) + exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane); } static void exynos_plane_atomic_disable(struct drm_plane *plane, @@ -192,9 +181,9 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane, if (!old_state->crtc) return; - if (exynos_crtc->ops->win_disable) - exynos_crtc->ops->win_disable(exynos_crtc, - exynos_plane->zpos); + if (exynos_crtc->ops->disable_plane) + exynos_crtc->ops->disable_plane(exynos_crtc, + exynos_plane); } static const struct drm_plane_helper_funcs plane_helper_funcs = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 3413393d8a16..581af35861a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -25,7 +25,6 @@ #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" #include "exynos_drm_plane.h" -#include "exynos_drm_encoder.h" #include "exynos_drm_vidi.h" /* vidi has totally three virtual windows. */ @@ -35,11 +34,10 @@ connector) struct vidi_context { - struct exynos_drm_display display; + struct drm_encoder encoder; struct platform_device *pdev; struct drm_device *drm_dev; struct exynos_drm_crtc *crtc; - struct drm_encoder *encoder; struct drm_connector connector; struct exynos_drm_plane planes[WINDOWS_NR]; struct edid *raw_edid; @@ -55,9 +53,9 @@ struct vidi_context { int pipe; }; -static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d) +static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e) { - return container_of(d, struct vidi_context, display); + return container_of(e, struct vidi_context, encoder); } static const char fake_edid_info[] = { @@ -100,7 +98,7 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc) /* * in case of page flip request, vidi_finish_pageflip function * will not be called because direct_vblank is true and then - * that function will be called by crtc_ops->win_commit callback + * that function will be called by crtc_ops->update_plane callback */ schedule_work(&ctx->work); @@ -118,19 +116,14 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc) ctx->vblank_on = false; } -static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) +static void vidi_update_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct vidi_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; if (ctx->suspended) return; - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; - DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr); if (ctx->vblank_on) @@ -179,7 +172,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = { .disable = vidi_disable, .enable_vblank = vidi_enable_vblank, .disable_vblank = vidi_disable_vblank, - .win_commit = vidi_win_commit, + .update_plane = vidi_update_plane, }; static void vidi_fake_vblank_handler(struct work_struct *work) @@ -196,7 +189,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work) mutex_lock(&ctx->lock); if (ctx->direct_vblank) { - drm_handle_vblank(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); ctx->direct_vblank = false; mutex_unlock(&ctx->lock); return; @@ -204,7 +197,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work) mutex_unlock(&ctx->lock); - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + exynos_drm_crtc_finish_pageflip(ctx->crtc); } static int vidi_show_connection(struct device *dev, @@ -259,9 +252,7 @@ static DEVICE_ATTR(connection, 0644, vidi_show_connection, int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file_priv) { - struct vidi_context *ctx = NULL; - struct drm_encoder *encoder; - struct exynos_drm_display *display; + struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev); struct drm_exynos_vidi_connection *vidi = data; if (!vidi) { @@ -274,21 +265,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, return -EINVAL; } - list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list, - head) { - display = exynos_drm_get_display(encoder); - - if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) { - ctx = display_to_vidi(display); - break; - } - } - - if (!ctx) { - DRM_DEBUG_KMS("not found virtual device type encoder.\n"); - return -EINVAL; - } - if (ctx->connected == vidi->connection) { DRM_DEBUG_KMS("same connection request.\n"); return -EINVAL; @@ -381,7 +357,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) { struct vidi_context *ctx = ctx_from_connector(connector); - return ctx->encoder; + return &ctx->encoder; } static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { @@ -389,14 +365,12 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { .best_encoder = vidi_best_encoder, }; -static int vidi_create_connector(struct exynos_drm_display *display, - struct drm_encoder *encoder) +static int vidi_create_connector(struct drm_encoder *encoder) { - struct vidi_context *ctx = display_to_vidi(display); + struct vidi_context *ctx = encoder_to_vidi(encoder); struct drm_connector *connector = &ctx->connector; int ret; - ctx->encoder = encoder; connector->polled = DRM_CONNECTOR_POLL_HPD; ret = drm_connector_init(ctx->drm_dev, connector, @@ -413,19 +387,47 @@ static int vidi_create_connector(struct exynos_drm_display *display, return 0; } +static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void exynos_vidi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} -static struct exynos_drm_display_ops vidi_display_ops = { - .create_connector = vidi_create_connector, +static void exynos_vidi_enable(struct drm_encoder *encoder) +{ +} + +static void exynos_vidi_disable(struct drm_encoder *encoder) +{ +} + +static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = { + .mode_fixup = exynos_vidi_mode_fixup, + .mode_set = exynos_vidi_mode_set, + .enable = exynos_vidi_enable, + .disable = exynos_vidi_disable, +}; + +static struct drm_encoder_funcs exynos_vidi_encoder_funcs = { + .destroy = drm_encoder_cleanup, }; static int vidi_bind(struct device *dev, struct device *master, void *data) { struct vidi_context *ctx = dev_get_drvdata(dev); struct drm_device *drm_dev = data; + struct drm_encoder *encoder = &ctx->encoder; struct exynos_drm_plane *exynos_plane; enum drm_plane_type type; unsigned int zpos; - int ret; + int pipe, ret; vidi_ctx_initialize(ctx, drm_dev); @@ -447,9 +449,24 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(ctx->crtc); } - ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display); + pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev, + EXYNOS_DISPLAY_TYPE_VIDI); + if (pipe < 0) + return pipe; + + encoder->possible_crtcs = 1 << pipe; + + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs); + + ret = vidi_create_connector(encoder); if (ret) { - ctx->crtc->base.funcs->destroy(&ctx->crtc->base); + DRM_ERROR("failed to create connector ret = %d\n", ret); + drm_encoder_cleanup(encoder); return ret; } @@ -475,8 +492,6 @@ static int vidi_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; - ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI; - ctx->display.ops = &vidi_display_ops; ctx->default_win = 0; ctx->pdev = pdev; diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 99e286489031..932f7fa240f8 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -22,7 +22,6 @@ #include "regs-hdmi.h" #include <linux/kernel.h> -#include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> #include <linux/platform_device.h> @@ -33,8 +32,8 @@ #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/io.h> -#include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/hdmi.h> #include <linux/component.h> @@ -48,7 +47,6 @@ #include "exynos_mixer.h" #include <linux/gpio.h> -#include <media/s5p_hdmi.h> #define ctx_from_connector(c) container_of(c, struct hdmi_context, connector) @@ -88,109 +86,14 @@ struct hdmi_resources { int regul_count; }; -struct hdmi_tg_regs { - u8 cmd[1]; - u8 h_fsz[2]; - u8 hact_st[2]; - u8 hact_sz[2]; - u8 v_fsz[2]; - u8 vsync[2]; - u8 vsync2[2]; - u8 vact_st[2]; - u8 vact_sz[2]; - u8 field_chg[2]; - u8 vact_st2[2]; - u8 vact_st3[2]; - u8 vact_st4[2]; - u8 vsync_top_hdmi[2]; - u8 vsync_bot_hdmi[2]; - u8 field_top_hdmi[2]; - u8 field_bot_hdmi[2]; - u8 tg_3d[1]; -}; - -struct hdmi_v13_core_regs { - u8 h_blank[2]; - u8 v_blank[3]; - u8 h_v_line[3]; - u8 vsync_pol[1]; - u8 int_pro_mode[1]; - u8 v_blank_f[3]; - u8 h_sync_gen[3]; - u8 v_sync_gen1[3]; - u8 v_sync_gen2[3]; - u8 v_sync_gen3[3]; -}; - -struct hdmi_v14_core_regs { - u8 h_blank[2]; - u8 v2_blank[2]; - u8 v1_blank[2]; - u8 v_line[2]; - u8 h_line[2]; - u8 hsync_pol[1]; - u8 vsync_pol[1]; - u8 int_pro_mode[1]; - u8 v_blank_f0[2]; - u8 v_blank_f1[2]; - u8 h_sync_start[2]; - u8 h_sync_end[2]; - u8 v_sync_line_bef_2[2]; - u8 v_sync_line_bef_1[2]; - u8 v_sync_line_aft_2[2]; - u8 v_sync_line_aft_1[2]; - u8 v_sync_line_aft_pxl_2[2]; - u8 v_sync_line_aft_pxl_1[2]; - u8 v_blank_f2[2]; /* for 3D mode */ - u8 v_blank_f3[2]; /* for 3D mode */ - u8 v_blank_f4[2]; /* for 3D mode */ - u8 v_blank_f5[2]; /* for 3D mode */ - u8 v_sync_line_aft_3[2]; - u8 v_sync_line_aft_4[2]; - u8 v_sync_line_aft_5[2]; - u8 v_sync_line_aft_6[2]; - u8 v_sync_line_aft_pxl_3[2]; - u8 v_sync_line_aft_pxl_4[2]; - u8 v_sync_line_aft_pxl_5[2]; - u8 v_sync_line_aft_pxl_6[2]; - u8 vact_space_1[2]; - u8 vact_space_2[2]; - u8 vact_space_3[2]; - u8 vact_space_4[2]; - u8 vact_space_5[2]; - u8 vact_space_6[2]; -}; - -struct hdmi_v13_conf { - struct hdmi_v13_core_regs core; - struct hdmi_tg_regs tg; -}; - -struct hdmi_v14_conf { - struct hdmi_v14_core_regs core; - struct hdmi_tg_regs tg; -}; - -struct hdmi_conf_regs { - int pixel_clock; - int cea_video_id; - enum hdmi_picture_aspect aspect_ratio; - union { - struct hdmi_v13_conf v13_conf; - struct hdmi_v14_conf v14_conf; - } conf; -}; - struct hdmi_context { - struct exynos_drm_display display; + struct drm_encoder encoder; struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; - struct drm_encoder *encoder; bool hpd; bool powered; bool dvi_mode; - struct mutex hdmi_mutex; void __iomem *regs; int irq; @@ -201,22 +104,20 @@ struct hdmi_context { /* current hdmiphy conf regs */ struct drm_display_mode current_mode; - struct hdmi_conf_regs mode_conf; + u8 cea_video_id; struct hdmi_resources res; + const struct hdmi_driver_data *drv_data; int hpd_gpio; void __iomem *regs_hdmiphy; - const struct hdmiphy_config *phy_confs; - unsigned int phy_conf_count; struct regmap *pmureg; - enum hdmi_type type; }; -static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d) +static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e) { - return container_of(d, struct hdmi_context, display); + return container_of(e, struct hdmi_context, encoder); } struct hdmiphy_config { @@ -624,6 +525,16 @@ static inline void hdmi_reg_writeb(struct hdmi_context *hdata, writeb(value, hdata->regs + reg_id); } +static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id, + int bytes, u32 val) +{ + while (--bytes >= 0) { + writeb(val & 0xff, hdata->regs + reg_id); + val >>= 8; + reg_id += 4; + } +} + static inline void hdmi_reg_writemask(struct hdmi_context *hdata, u32 reg_id, u32 value, u32 mask) { @@ -930,7 +841,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix) static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) { - if (hdata->type == HDMI_TYPE13) + if (hdata->drv_data->type == HDMI_TYPE13) hdmi_v13_regs_dump(hdata, prefix); else hdmi_v14_regs_dump(hdata, prefix); @@ -957,7 +868,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, u32 hdr_sum; u8 chksum; u32 mod; - u32 vic; + u8 ar; mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); if (hdata->dvi_mode) { @@ -988,27 +899,22 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, * Set the aspect ratio as per the mode, mentioned in * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard */ - switch (hdata->mode_conf.aspect_ratio) { + ar = hdata->current_mode.picture_aspect_ratio; + switch (ar) { case HDMI_PICTURE_ASPECT_4_3: - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), - hdata->mode_conf.aspect_ratio | - AVI_4_3_CENTER_RATIO); + ar |= AVI_4_3_CENTER_RATIO; break; case HDMI_PICTURE_ASPECT_16_9: - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), - hdata->mode_conf.aspect_ratio | - AVI_16_9_CENTER_RATIO); + ar |= AVI_16_9_CENTER_RATIO; break; case HDMI_PICTURE_ASPECT_NONE: default: - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), - hdata->mode_conf.aspect_ratio | - AVI_SAME_AS_PIC_ASPECT_RATIO); + ar |= AVI_SAME_AS_PIC_ASPECT_RATIO; break; } + hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar); - vic = hdata->mode_conf.cea_video_id; - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); + hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id); chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), infoframe->any.length, hdr_sum); @@ -1038,10 +944,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector, { struct hdmi_context *hdata = ctx_from_connector(connector); - hdata->hpd = gpio_get_value(hdata->hpd_gpio); + if (gpio_get_value(hdata->hpd_gpio)) + return connector_status_connected; - return hdata->hpd ? connector_status_connected : - connector_status_disconnected; + return connector_status_disconnected; } static void hdmi_connector_destroy(struct drm_connector *connector) @@ -1064,6 +970,7 @@ static int hdmi_get_modes(struct drm_connector *connector) { struct hdmi_context *hdata = ctx_from_connector(connector); struct edid *edid; + int ret; if (!hdata->ddc_adpt) return -ENODEV; @@ -1079,15 +986,19 @@ static int hdmi_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return ret; } static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) { int i; - for (i = 0; i < hdata->phy_conf_count; i++) - if (hdata->phy_confs[i].pixel_clock == pixel_clock) + for (i = 0; i < hdata->drv_data->phy_conf_count; i++) + if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock) return i; DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); @@ -1120,7 +1031,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) { struct hdmi_context *hdata = ctx_from_connector(connector); - return hdata->encoder; + return &hdata->encoder; } static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { @@ -1129,14 +1040,12 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .best_encoder = hdmi_best_encoder, }; -static int hdmi_create_connector(struct exynos_drm_display *display, - struct drm_encoder *encoder) +static int hdmi_create_connector(struct drm_encoder *encoder) { - struct hdmi_context *hdata = display_to_hdmi(display); + struct hdmi_context *hdata = encoder_to_hdmi(encoder); struct drm_connector *connector = &hdata->connector; int ret; - hdata->encoder = encoder; connector->interlace_allowed = true; connector->polled = DRM_CONNECTOR_POLL_HPD; @@ -1154,23 +1063,30 @@ static int hdmi_create_connector(struct exynos_drm_display *display, return 0; } -static void hdmi_mode_fixup(struct exynos_drm_display *display, - struct drm_connector *connector, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool hdmi_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; struct drm_display_mode *m; int mode_ok; - DRM_DEBUG_KMS("%s\n", __FILE__); - drm_mode_set_crtcinfo(adjusted_mode, 0); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + break; + } + + if (connector->encoder != encoder) + return true; + mode_ok = hdmi_mode_valid(connector, adjusted_mode); /* just return if user desired mode exists. */ if (mode_ok == MODE_OK) - return; + return true; /* * otherwise, find the most suitable mode among modes and change it @@ -1190,6 +1106,8 @@ static void hdmi_mode_fixup(struct exynos_drm_display *display, break; } } + + return true; } static void hdmi_set_acr(u32 freq, u8 *acr) @@ -1252,7 +1170,7 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); - if (hdata->type == HDMI_TYPE13) + if (hdata->drv_data->type == HDMI_TYPE13) hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); else hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); @@ -1386,7 +1304,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata) HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); } - if (hdata->type == HDMI_TYPE13) { + if (hdata->drv_data->type == HDMI_TYPE13) { /* choose bluescreen (fecal) color */ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); @@ -1419,66 +1337,94 @@ static void hdmi_conf_init(struct hdmi_context *hdata) static void hdmi_v13_mode_apply(struct hdmi_context *hdata) { - const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; - const struct hdmi_v13_core_regs *core = - &hdata->mode_conf.conf.v13_conf.core; + struct drm_display_mode *m = &hdata->current_mode; + unsigned int val; int tries; - /* setting core registers */ - hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); - hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); - hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]); - hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]); - hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]); - hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]); - hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]); - hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]); - hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); - hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); - hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]); - hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]); - hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]); - hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]); - hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]); - hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); - hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); + hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay); + hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3, + (m->htotal << 12) | m->vtotal); + + val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0; + hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val); + + val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0; + hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val); + + val = (m->hsync_start - m->hdisplay - 2); + val |= ((m->hsync_end - m->hdisplay - 2) << 10); + val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20; + hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val); + + /* + * Quirk requirement for exynos HDMI IP design, + * 2 pixels less than the actual calculation for hsync_start + * and end. + */ + + /* Following values & calculations differ for different type of modes */ + if (m->flags & DRM_MODE_FLAG_INTERLACE) { + /* Interlaced Mode */ + val = ((m->vsync_end - m->vdisplay) / 2); + val |= ((m->vsync_start - m->vdisplay) / 2) << 12; + hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val); + + val = m->vtotal / 2; + val |= ((m->vtotal - m->vdisplay) / 2) << 11; + hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val); + + val = (m->vtotal + + ((m->vsync_end - m->vsync_start) * 4) + 5) / 2; + val |= m->vtotal << 11; + hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val); + + val = ((m->vtotal / 2) + 7); + val |= ((m->vtotal / 2) + 2) << 12; + hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val); + + val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay)); + val |= ((m->htotal / 2) + + (m->hsync_start - m->hdisplay)) << 12; + hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val); + + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2, + (m->vtotal - m->vdisplay) / 2); + hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2); + + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249); + } else { + /* Progressive Mode */ + + val = m->vtotal; + val |= (m->vtotal - m->vdisplay) << 11; + hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val); + + hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0); + + val = (m->vsync_end - m->vdisplay); + val |= ((m->vsync_start - m->vdisplay) << 12); + hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val); + + hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001); + hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2, + m->vtotal - m->vdisplay); + hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248); + } + /* Timing generator registers */ - hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); + hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal); + hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay); + hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay); + hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233); + hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233); + hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1); + hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { @@ -1503,144 +1449,119 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata) static void hdmi_v14_mode_apply(struct hdmi_context *hdata) { - const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; - const struct hdmi_v14_core_regs *core = - &hdata->mode_conf.conf.v14_conf.core; + struct drm_display_mode *m = &hdata->current_mode; int tries; - /* setting core registers */ - hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); - hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); - hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]); - hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]); - hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]); - hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]); - hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]); - hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]); - hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]); - hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]); - hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]); - hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); - hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]); - hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]); - hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]); - hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]); - hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0, - core->v_sync_line_bef_2[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1, - core->v_sync_line_bef_2[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0, - core->v_sync_line_bef_1[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1, - core->v_sync_line_bef_1[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0, - core->v_sync_line_aft_2[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1, - core->v_sync_line_aft_2[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0, - core->v_sync_line_aft_1[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1, - core->v_sync_line_aft_1[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, - core->v_sync_line_aft_pxl_2[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1, - core->v_sync_line_aft_pxl_2[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, - core->v_sync_line_aft_pxl_1[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1, - core->v_sync_line_aft_pxl_1[1]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]); - hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0, - core->v_sync_line_aft_3[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1, - core->v_sync_line_aft_3[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0, - core->v_sync_line_aft_4[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1, - core->v_sync_line_aft_4[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0, - core->v_sync_line_aft_5[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1, - core->v_sync_line_aft_5[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0, - core->v_sync_line_aft_6[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1, - core->v_sync_line_aft_6[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, - core->v_sync_line_aft_pxl_3[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1, - core->v_sync_line_aft_pxl_3[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, - core->v_sync_line_aft_pxl_4[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1, - core->v_sync_line_aft_pxl_4[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, - core->v_sync_line_aft_pxl_5[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1, - core->v_sync_line_aft_pxl_5[1]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, - core->v_sync_line_aft_pxl_6[0]); - hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1, - core->v_sync_line_aft_pxl_6[1]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]); - hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); + hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay); + hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal); + hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal); + hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1, + (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0); + hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, + (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0); + hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, + (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0); + + /* + * Quirk requirement for exynos 5 HDMI IP design, + * 2 pixels less than the actual calculation for hsync_start + * and end. + */ + + /* Following values & calculations differ for different type of modes */ + if (m->flags & DRM_MODE_FLAG_INTERLACE) { + /* Interlaced Mode */ + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2, + (m->vsync_end - m->vdisplay) / 2); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2, + (m->vsync_start - m->vdisplay) / 2); + hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2); + hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2, + (m->vtotal - m->vdisplay) / 2); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, + m->vtotal - m->vdisplay / 2); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, + (m->vtotal / 2) + 7); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, + (m->vtotal / 2) + 2); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, + (m->htotal / 2) + (m->hsync_start - m->hdisplay)); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, + (m->htotal / 2) + (m->hsync_start - m->hdisplay)); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2, + (m->vtotal - m->vdisplay) / 2); + hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, + m->vtotal - m->vdisplay / 2); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, + (m->vtotal / 2) + 1); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, + (m->vtotal / 2) + 1); + hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, + (m->vtotal / 2) + 1); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0); + } else { + /* Progressive Mode */ + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2, + m->vsync_end - m->vdisplay); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2, + m->vsync_start - m->vdisplay); + hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal); + hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2, + m->vtotal - m->vdisplay); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2, + m->vtotal - m->vdisplay); + hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b); + hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233); + hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233); + } + + /* Following values & calculations are same irrespective of mode type */ + hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2, + m->hsync_start - m->hdisplay - 2); + hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2, + m->hsync_end - m->hdisplay - 2); + hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff); + hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff); /* Timing generator registers */ - hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); - hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); - hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]); + hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal); + hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay); + hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay); + hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1); + hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233); + hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1); + hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1); + hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { @@ -1665,7 +1586,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata) static void hdmi_mode_apply(struct hdmi_context *hdata) { - if (hdata->type == HDMI_TYPE13) + if (hdata->drv_data->type == HDMI_TYPE13) hdmi_v13_mode_apply(hdata); else hdmi_v14_mode_apply(hdata); @@ -1683,7 +1604,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata) hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, HDMI_PHY_ENABLE_MODE_SET); - if (hdata->type == HDMI_TYPE13) + if (hdata->drv_data->type == HDMI_TYPE13) reg = HDMI_V13_PHY_RSTOUT; else reg = HDMI_PHY_RSTOUT; @@ -1697,7 +1618,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata) static void hdmiphy_poweron(struct hdmi_context *hdata) { - if (hdata->type != HDMI_TYPE14) + if (hdata->drv_data->type != HDMI_TYPE14) return; DRM_DEBUG_KMS("\n"); @@ -1717,7 +1638,7 @@ static void hdmiphy_poweron(struct hdmi_context *hdata) static void hdmiphy_poweroff(struct hdmi_context *hdata) { - if (hdata->type != HDMI_TYPE14) + if (hdata->drv_data->type != HDMI_TYPE14) return; DRM_DEBUG_KMS("\n"); @@ -1743,13 +1664,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata) int i; /* pixel clock */ - i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock); + i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000); if (i < 0) { DRM_ERROR("failed to find hdmiphy conf\n"); return; } - ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32); + ret = hdmiphy_reg_write_buf(hdata, 0, + hdata->drv_data->phy_confs[i].conf, 32); if (ret) { DRM_ERROR("failed to configure hdmiphy\n"); return; @@ -1771,10 +1693,8 @@ static void hdmi_conf_apply(struct hdmi_context *hdata) hdmiphy_conf_reset(hdata); hdmiphy_conf_apply(hdata); - mutex_lock(&hdata->hdmi_mutex); hdmi_start(hdata, false); hdmi_conf_init(hdata); - mutex_unlock(&hdata->hdmi_mutex); hdmi_audio_init(hdata); @@ -1785,271 +1705,32 @@ static void hdmi_conf_apply(struct hdmi_context *hdata) hdmi_regs_dump(hdata, "start"); } -static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value) -{ - int i; - BUG_ON(num_bytes > 4); - for (i = 0; i < num_bytes; i++) - reg_pair[i] = (value >> (8 * i)) & 0xff; -} - -static void hdmi_v13_mode_set(struct hdmi_context *hdata, - struct drm_display_mode *m) +static void hdmi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core; - struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; - unsigned int val; - - hdata->mode_conf.cea_video_id = - drm_match_cea_mode((struct drm_display_mode *)m); - hdata->mode_conf.pixel_clock = m->clock * 1000; - hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio; - - hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); - hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal); - - val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0; - hdmi_set_reg(core->vsync_pol, 1, val); - - val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0; - hdmi_set_reg(core->int_pro_mode, 1, val); - - val = (m->hsync_start - m->hdisplay - 2); - val |= ((m->hsync_end - m->hdisplay - 2) << 10); - val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20; - hdmi_set_reg(core->h_sync_gen, 3, val); - - /* - * Quirk requirement for exynos HDMI IP design, - * 2 pixels less than the actual calculation for hsync_start - * and end. - */ - - /* Following values & calculations differ for different type of modes */ - if (m->flags & DRM_MODE_FLAG_INTERLACE) { - /* Interlaced Mode */ - val = ((m->vsync_end - m->vdisplay) / 2); - val |= ((m->vsync_start - m->vdisplay) / 2) << 12; - hdmi_set_reg(core->v_sync_gen1, 3, val); - - val = m->vtotal / 2; - val |= ((m->vtotal - m->vdisplay) / 2) << 11; - hdmi_set_reg(core->v_blank, 3, val); - - val = (m->vtotal + - ((m->vsync_end - m->vsync_start) * 4) + 5) / 2; - val |= m->vtotal << 11; - hdmi_set_reg(core->v_blank_f, 3, val); - - val = ((m->vtotal / 2) + 7); - val |= ((m->vtotal / 2) + 2) << 12; - hdmi_set_reg(core->v_sync_gen2, 3, val); - - val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay)); - val |= ((m->htotal / 2) + - (m->hsync_start - m->hdisplay)) << 12; - hdmi_set_reg(core->v_sync_gen3, 3, val); - - hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); - hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); - - hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/ - } else { - /* Progressive Mode */ - - val = m->vtotal; - val |= (m->vtotal - m->vdisplay) << 11; - hdmi_set_reg(core->v_blank, 3, val); - - hdmi_set_reg(core->v_blank_f, 3, 0); - - val = (m->vsync_end - m->vdisplay); - val |= ((m->vsync_start - m->vdisplay) << 12); - hdmi_set_reg(core->v_sync_gen1, 3, val); - - hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */ - hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */ - hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); - hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); - hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ - } - - /* Timing generator registers */ - hdmi_set_reg(tg->cmd, 1, 0x0); - hdmi_set_reg(tg->h_fsz, 2, m->htotal); - hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); - hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); - hdmi_set_reg(tg->v_fsz, 2, m->vtotal); - hdmi_set_reg(tg->vsync, 2, 0x1); - hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ - hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ - hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ - hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ - hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ - hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ - hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */ -} - -static void hdmi_v14_mode_set(struct hdmi_context *hdata, - struct drm_display_mode *m) -{ - struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; - struct hdmi_v14_core_regs *core = - &hdata->mode_conf.conf.v14_conf.core; - - hdata->mode_conf.cea_video_id = - drm_match_cea_mode((struct drm_display_mode *)m); - hdata->mode_conf.pixel_clock = m->clock * 1000; - hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio; - - hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); - hdmi_set_reg(core->v_line, 2, m->vtotal); - hdmi_set_reg(core->h_line, 2, m->htotal); - hdmi_set_reg(core->hsync_pol, 1, - (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0); - hdmi_set_reg(core->vsync_pol, 1, - (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0); - hdmi_set_reg(core->int_pro_mode, 1, - (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0); - - /* - * Quirk requirement for exynos 5 HDMI IP design, - * 2 pixels less than the actual calculation for hsync_start - * and end. - */ - - /* Following values & calculations differ for different type of modes */ - if (m->flags & DRM_MODE_FLAG_INTERLACE) { - /* Interlaced Mode */ - hdmi_set_reg(core->v_sync_line_bef_2, 2, - (m->vsync_end - m->vdisplay) / 2); - hdmi_set_reg(core->v_sync_line_bef_1, 2, - (m->vsync_start - m->vdisplay) / 2); - hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2); - hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2); - hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2); - hdmi_set_reg(core->v_blank_f1, 2, m->vtotal); - hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7); - hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2); - hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, - (m->htotal / 2) + (m->hsync_start - m->hdisplay)); - hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, - (m->htotal / 2) + (m->hsync_start - m->hdisplay)); - hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); - hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); - hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2); - hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1); - hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1); - hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1); - hdmi_set_reg(tg->vact_st3, 2, 0x0); - hdmi_set_reg(tg->vact_st4, 2, 0x0); - } else { - /* Progressive Mode */ - hdmi_set_reg(core->v_sync_line_bef_2, 2, - m->vsync_end - m->vdisplay); - hdmi_set_reg(core->v_sync_line_bef_1, 2, - m->vsync_start - m->vdisplay); - hdmi_set_reg(core->v2_blank, 2, m->vtotal); - hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay); - hdmi_set_reg(core->v_blank_f0, 2, 0xffff); - hdmi_set_reg(core->v_blank_f1, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff); - hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); - hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); - hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ - hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */ - hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */ - hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ - hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ - hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ - } - - /* Following values & calculations are same irrespective of mode type */ - hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2); - hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2); - hdmi_set_reg(core->vact_space_1, 2, 0xffff); - hdmi_set_reg(core->vact_space_2, 2, 0xffff); - hdmi_set_reg(core->vact_space_3, 2, 0xffff); - hdmi_set_reg(core->vact_space_4, 2, 0xffff); - hdmi_set_reg(core->vact_space_5, 2, 0xffff); - hdmi_set_reg(core->vact_space_6, 2, 0xffff); - hdmi_set_reg(core->v_blank_f2, 2, 0xffff); - hdmi_set_reg(core->v_blank_f3, 2, 0xffff); - hdmi_set_reg(core->v_blank_f4, 2, 0xffff); - hdmi_set_reg(core->v_blank_f5, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff); - hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff); - - /* Timing generator registers */ - hdmi_set_reg(tg->cmd, 1, 0x0); - hdmi_set_reg(tg->h_fsz, 2, m->htotal); - hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); - hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); - hdmi_set_reg(tg->v_fsz, 2, m->vtotal); - hdmi_set_reg(tg->vsync, 2, 0x1); - hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ - hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ - hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ - hdmi_set_reg(tg->tg_3d, 1, 0x0); -} - -static void hdmi_mode_set(struct exynos_drm_display *display, - struct drm_display_mode *mode) -{ - struct hdmi_context *hdata = display_to_hdmi(display); - struct drm_display_mode *m = mode; + struct hdmi_context *hdata = encoder_to_hdmi(encoder); + struct drm_display_mode *m = adjusted_mode; DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n", m->hdisplay, m->vdisplay, m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? "INTERLACED" : "PROGRESSIVE"); - /* preserve mode information for later use. */ - drm_mode_copy(&hdata->current_mode, mode); - - if (hdata->type == HDMI_TYPE13) - hdmi_v13_mode_set(hdata, mode); - else - hdmi_v14_mode_set(hdata, mode); -} - -static void hdmi_commit(struct exynos_drm_display *display) -{ - struct hdmi_context *hdata = display_to_hdmi(display); - - mutex_lock(&hdata->hdmi_mutex); - if (!hdata->powered) { - mutex_unlock(&hdata->hdmi_mutex); - return; - } - mutex_unlock(&hdata->hdmi_mutex); - - hdmi_conf_apply(hdata); + drm_mode_copy(&hdata->current_mode, m); + hdata->cea_video_id = drm_match_cea_mode(mode); } -static void hdmi_poweron(struct hdmi_context *hdata) +static void hdmi_enable(struct drm_encoder *encoder) { + struct hdmi_context *hdata = encoder_to_hdmi(encoder); struct hdmi_resources *res = &hdata->res; - mutex_lock(&hdata->hdmi_mutex); - if (hdata->powered) { - mutex_unlock(&hdata->hdmi_mutex); + if (hdata->powered) return; - } hdata->powered = true; - mutex_unlock(&hdata->hdmi_mutex); - pm_runtime_get_sync(hdata->dev); if (regulator_bulk_enable(res->regul_count, res->regul_bulk)) @@ -2063,17 +1744,32 @@ static void hdmi_poweron(struct hdmi_context *hdata) clk_prepare_enable(res->sclk_hdmi); hdmiphy_poweron(hdata); - hdmi_commit(&hdata->display); + hdmi_conf_apply(hdata); } -static void hdmi_poweroff(struct hdmi_context *hdata) +static void hdmi_disable(struct drm_encoder *encoder) { + struct hdmi_context *hdata = encoder_to_hdmi(encoder); struct hdmi_resources *res = &hdata->res; + struct drm_crtc *crtc = encoder->crtc; + const struct drm_crtc_helper_funcs *funcs = NULL; - mutex_lock(&hdata->hdmi_mutex); if (!hdata->powered) - goto out; - mutex_unlock(&hdata->hdmi_mutex); + return; + + /* + * The SFRs of VP and Mixer are updated by Vertical Sync of + * Timing generator which is a part of HDMI so the sequence + * to disable TV Subsystem should be as following, + * VP -> Mixer -> HDMI + * + * Below codes will try to disable Mixer and VP(if used) + * prior to disabling HDMI. + */ + if (crtc) + funcs = crtc->helper_private; + if (funcs && funcs->disable) + (*funcs->disable)(crtc); /* HDMI System Disable */ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN); @@ -2093,57 +1789,18 @@ static void hdmi_poweroff(struct hdmi_context *hdata) pm_runtime_put_sync(hdata->dev); - mutex_lock(&hdata->hdmi_mutex); hdata->powered = false; - -out: - mutex_unlock(&hdata->hdmi_mutex); } -static void hdmi_dpms(struct exynos_drm_display *display, int mode) -{ - struct hdmi_context *hdata = display_to_hdmi(display); - struct drm_encoder *encoder = hdata->encoder; - struct drm_crtc *crtc = encoder->crtc; - const struct drm_crtc_helper_funcs *funcs = NULL; - - DRM_DEBUG_KMS("mode %d\n", mode); - - switch (mode) { - case DRM_MODE_DPMS_ON: - hdmi_poweron(hdata); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - /* - * The SFRs of VP and Mixer are updated by Vertical Sync of - * Timing generator which is a part of HDMI so the sequence - * to disable TV Subsystem should be as following, - * VP -> Mixer -> HDMI - * - * Below codes will try to disable Mixer and VP(if used) - * prior to disabling HDMI. - */ - if (crtc) - funcs = crtc->helper_private; - if (funcs && funcs->disable) - (*funcs->disable)(crtc); - - hdmi_poweroff(hdata); - break; - default: - DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); - break; - } -} - -static struct exynos_drm_display_ops hdmi_display_ops = { - .create_connector = hdmi_create_connector, +static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { .mode_fixup = hdmi_mode_fixup, .mode_set = hdmi_mode_set, - .dpms = hdmi_dpms, - .commit = hdmi_commit, + .enable = hdmi_enable, + .disable = hdmi_disable, +}; + +static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = { + .destroy = drm_encoder_cleanup, }; static void hdmi_hotplug_work_func(struct work_struct *work) @@ -2152,10 +1809,6 @@ static void hdmi_hotplug_work_func(struct work_struct *work) hdata = container_of(work, struct hdmi_context, hotplug_work.work); - mutex_lock(&hdata->hdmi_mutex); - hdata->hpd = gpio_get_value(hdata->hpd_gpio); - mutex_unlock(&hdata->hdmi_mutex); - if (hdata->drm_dev) drm_helper_hpd_irq_event(hdata->drm_dev); } @@ -2254,30 +1907,6 @@ fail: return ret; } -static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata - (struct device *dev) -{ - struct device_node *np = dev->of_node; - struct s5p_hdmi_platform_data *pd; - u32 value; - - pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); - if (!pd) - goto err_data; - - if (!of_find_property(np, "hpd-gpio", &value)) { - DRM_ERROR("no hpd gpio property found\n"); - goto err_data; - } - - pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0); - - return pd; - -err_data: - return NULL; -} - static struct of_device_id hdmi_match_types[] = { { .compatible = "samsung,exynos5-hdmi", @@ -2301,10 +1930,33 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm_dev = data; struct hdmi_context *hdata = dev_get_drvdata(dev); + struct drm_encoder *encoder = &hdata->encoder; + int ret, pipe; hdata->drm_dev = drm_dev; - return exynos_drm_create_enc_conn(drm_dev, &hdata->display); + pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev, + EXYNOS_DISPLAY_TYPE_HDMI); + if (pipe < 0) + return pipe; + + encoder->possible_crtcs = 1 << pipe; + + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs); + + ret = hdmi_create_connector(encoder); + if (ret) { + DRM_ERROR("failed to create connector ret = %d\n", ret); + drm_encoder_cleanup(encoder); + return ret; + } + + return 0; } static void hdmi_unbind(struct device *dev, struct device *master, void *data) @@ -2338,43 +1990,30 @@ static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev) static int hdmi_probe(struct platform_device *pdev) { struct device_node *ddc_node, *phy_node; - struct s5p_hdmi_platform_data *pdata; - struct hdmi_driver_data *drv_data; const struct of_device_id *match; struct device *dev = &pdev->dev; struct hdmi_context *hdata; struct resource *res; int ret; - if (!dev->of_node) - return -ENODEV; - - pdata = drm_hdmi_dt_parse_pdata(dev); - if (!pdata) - return -EINVAL; - hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); if (!hdata) return -ENOMEM; - hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI; - hdata->display.ops = &hdmi_display_ops; - - mutex_init(&hdata->hdmi_mutex); - - platform_set_drvdata(pdev, hdata); - - match = of_match_node(hdmi_match_types, dev->of_node); + match = of_match_device(hdmi_match_types, dev); if (!match) return -ENODEV; - drv_data = (struct hdmi_driver_data *)match->data; - hdata->type = drv_data->type; - hdata->phy_confs = drv_data->phy_confs; - hdata->phy_conf_count = drv_data->phy_conf_count; + hdata->drv_data = match->data; + + platform_set_drvdata(pdev, hdata); - hdata->hpd_gpio = pdata->hpd_gpio; hdata->dev = dev; + hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0); + if (hdata->hpd_gpio < 0) { + DRM_ERROR("cannot get hpd gpio property\n"); + return hdata->hpd_gpio; + } ret = hdmi_resources_init(hdata); if (ret) { @@ -2426,7 +2065,7 @@ out_get_ddc_adpt: } out_get_phy_port: - if (drv_data->is_apb_phy) { + if (hdata->drv_data->is_apb_phy) { hdata->regs_hdmiphy = of_iomap(phy_node, 0); if (!hdata->regs_hdmiphy) { DRM_ERROR("failed to ioremap hdmi phy\n"); @@ -2449,8 +2088,6 @@ out_get_phy_port: goto err_hdmiphy; } - hdata->hpd = gpio_get_value(hdata->hpd_gpio); - INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); ret = devm_request_threaded_irq(dev, hdata->irq, NULL, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index cae98db33062..e68340c77676 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -69,6 +69,11 @@ enum mixer_version_id { MXR_VER_128_0_0_184, }; +enum mixer_flag_bits { + MXR_BIT_POWERED, + MXR_BIT_VSYNC, +}; + struct mixer_context { struct platform_device *pdev; struct device *dev; @@ -76,13 +81,11 @@ struct mixer_context { struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[MIXER_WIN_NR]; int pipe; + unsigned long flags; bool interlace; - bool powered; bool vp_enabled; bool has_sclk; - u32 int_en; - struct mutex mixer_mutex; struct mixer_resources mixer_res; enum mixer_version_id mxr_ver; wait_queue_head_t wait_vsync_queue; @@ -380,19 +383,20 @@ static void mixer_stop(struct mixer_context *ctx) usleep_range(10000, 12000); } -static void vp_video_buffer(struct mixer_context *ctx, unsigned int win) +static void vp_video_buffer(struct mixer_context *ctx, + struct exynos_drm_plane *plane) { struct mixer_resources *res = &ctx->mixer_res; + struct drm_plane_state *state = plane->base.state; + struct drm_framebuffer *fb = state->fb; + struct drm_display_mode *mode = &state->crtc->mode; unsigned long flags; - struct exynos_drm_plane *plane; dma_addr_t luma_addr[2], chroma_addr[2]; bool tiled_mode = false; bool crcb_mode = false; u32 val; - plane = &ctx->planes[win]; - - switch (plane->pixel_format) { + switch (fb->pixel_format) { case DRM_FORMAT_NV12: crcb_mode = false; break; @@ -401,21 +405,21 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win) break; default: DRM_ERROR("pixel format for vp is wrong [%d].\n", - plane->pixel_format); + fb->pixel_format); return; } luma_addr[0] = plane->dma_addr[0]; chroma_addr[0] = plane->dma_addr[1]; - if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) { + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { ctx->interlace = true; if (tiled_mode) { luma_addr[1] = luma_addr[0] + 0x40; chroma_addr[1] = chroma_addr[0] + 0x40; } else { - luma_addr[1] = luma_addr[0] + plane->pitch; - chroma_addr[1] = chroma_addr[0] + plane->pitch; + luma_addr[1] = luma_addr[0] + fb->pitches[0]; + chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; } } else { ctx->interlace = false; @@ -436,25 +440,25 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win) vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); /* setting size of input image */ - vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) | - VP_IMG_VSIZE(plane->fb_height)); + vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | + VP_IMG_VSIZE(fb->height)); /* chroma height has to reduced by 2 to avoid chroma distorions */ - vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) | - VP_IMG_VSIZE(plane->fb_height / 2)); + vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | + VP_IMG_VSIZE(fb->height / 2)); - vp_reg_write(res, VP_SRC_WIDTH, plane->src_width); - vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height); + vp_reg_write(res, VP_SRC_WIDTH, plane->src_w); + vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h); vp_reg_write(res, VP_SRC_H_POSITION, VP_SRC_H_POSITION_VAL(plane->src_x)); vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y); - vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width); + vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w); vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x); if (ctx->interlace) { - vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2); + vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2); vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2); } else { - vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height); + vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h); vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y); } @@ -469,9 +473,9 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win) vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]); vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]); - mixer_cfg_scan(ctx, plane->mode_height); - mixer_cfg_rgb_fmt(ctx, plane->mode_height); - mixer_cfg_layer(ctx, win, true); + mixer_cfg_scan(ctx, mode->vdisplay); + mixer_cfg_rgb_fmt(ctx, mode->vdisplay); + mixer_cfg_layer(ctx, plane->zpos, true); mixer_run(ctx); mixer_vsync_set_update(ctx, true); @@ -491,15 +495,15 @@ static void mixer_layer_update(struct mixer_context *ctx) static int mixer_setup_scale(const struct exynos_drm_plane *plane, unsigned int *x_ratio, unsigned int *y_ratio) { - if (plane->crtc_width != plane->src_width) { - if (plane->crtc_width == 2 * plane->src_width) + if (plane->crtc_w != plane->src_w) { + if (plane->crtc_w == 2 * plane->src_w) *x_ratio = 1; else goto fail; } - if (plane->crtc_height != plane->src_height) { - if (plane->crtc_height == 2 * plane->src_height) + if (plane->crtc_h != plane->src_h) { + if (plane->crtc_h == 2 * plane->src_h) *y_ratio = 1; else goto fail; @@ -512,20 +516,22 @@ fail: return -ENOTSUPP; } -static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win) +static void mixer_graph_buffer(struct mixer_context *ctx, + struct exynos_drm_plane *plane) { struct mixer_resources *res = &ctx->mixer_res; + struct drm_plane_state *state = plane->base.state; + struct drm_framebuffer *fb = state->fb; + struct drm_display_mode *mode = &state->crtc->mode; unsigned long flags; - struct exynos_drm_plane *plane; + unsigned int win = plane->zpos; unsigned int x_ratio = 0, y_ratio = 0; unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset; dma_addr_t dma_addr; unsigned int fmt; u32 val; - plane = &ctx->planes[win]; - - switch (plane->pixel_format) { + switch (fb->pixel_format) { case DRM_FORMAT_XRGB4444: fmt = MXR_FORMAT_ARGB4444; break; @@ -557,12 +563,12 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win) /* converting dma address base and source offset */ dma_addr = plane->dma_addr[0] - + (plane->src_x * plane->bpp >> 3) - + (plane->src_y * plane->pitch); + + (plane->src_x * fb->bits_per_pixel >> 3) + + (plane->src_y * fb->pitches[0]); src_x_offset = 0; src_y_offset = 0; - if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) + if (mode->flags & DRM_MODE_FLAG_INTERLACE) ctx->interlace = true; else ctx->interlace = false; @@ -576,18 +582,18 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win) /* setup geometry */ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), - plane->pitch / (plane->bpp >> 3)); + fb->pitches[0] / (fb->bits_per_pixel >> 3)); /* setup display size */ if (ctx->mxr_ver == MXR_VER_128_0_0_184 && win == MIXER_DEFAULT_WIN) { - val = MXR_MXR_RES_HEIGHT(plane->mode_height); - val |= MXR_MXR_RES_WIDTH(plane->mode_width); + val = MXR_MXR_RES_HEIGHT(mode->vdisplay); + val |= MXR_MXR_RES_WIDTH(mode->hdisplay); mixer_reg_write(res, MXR_RESOLUTION, val); } - val = MXR_GRP_WH_WIDTH(plane->src_width); - val |= MXR_GRP_WH_HEIGHT(plane->src_height); + val = MXR_GRP_WH_WIDTH(plane->src_w); + val |= MXR_GRP_WH_HEIGHT(plane->src_h); val |= MXR_GRP_WH_H_SCALE(x_ratio); val |= MXR_GRP_WH_V_SCALE(y_ratio); mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); @@ -605,8 +611,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win) /* set buffer address to mixer */ mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr); - mixer_cfg_scan(ctx, plane->mode_height); - mixer_cfg_rgb_fmt(ctx, plane->mode_height); + mixer_cfg_scan(ctx, mode->vdisplay); + mixer_cfg_rgb_fmt(ctx, mode->vdisplay); mixer_cfg_layer(ctx, win, true); /* layer update mandatory for mixer 16.0.33.0 */ @@ -718,6 +724,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) /* handling VSYNC */ if (val & MXR_INT_STATUS_VSYNC) { + /* vsync interrupt use different bit for read and clear */ + val |= MXR_INT_CLEAR_VSYNC; + val &= ~MXR_INT_STATUS_VSYNC; + /* interlace scan need to check shadow register */ if (ctx->interlace) { base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); @@ -731,8 +741,8 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) goto out; } - drm_handle_vblank(ctx->drm_dev, ctx->pipe); - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); + exynos_drm_crtc_finish_pageflip(ctx->crtc); /* set wait vsync event to zero and wake up queue. */ if (atomic_read(&ctx->wait_vsync_event)) { @@ -743,11 +753,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) out: /* clear interrupts */ - if (~val & MXR_INT_EN_VSYNC) { - /* vsync interrupt use different bit for read and clear */ - val &= ~MXR_INT_EN_VSYNC; - val |= MXR_INT_CLEAR_VSYNC; - } mixer_reg_write(res, MXR_INT_STATUS, val); spin_unlock(&res->reg_slock); @@ -882,8 +887,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, } } - ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev, - mixer_ctx->dev); + ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev); if (ret) priv->pipe--; @@ -892,8 +896,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, static void mixer_ctx_remove(struct mixer_context *mixer_ctx) { - if (is_drm_iommu_supported(mixer_ctx->drm_dev)) - drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev); + drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev); } static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) @@ -901,14 +904,13 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) struct mixer_context *mixer_ctx = crtc->ctx; struct mixer_resources *res = &mixer_ctx->mixer_res; - if (!mixer_ctx->powered) { - mixer_ctx->int_en |= MXR_INT_EN_VSYNC; + __set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags); + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return 0; - } /* enable vsync interrupt */ - mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, - MXR_INT_EN_VSYNC); + mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); + mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); return 0; } @@ -918,48 +920,48 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) struct mixer_context *mixer_ctx = crtc->ctx; struct mixer_resources *res = &mixer_ctx->mixer_res; + __clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags); + + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + return; + /* disable vsync interrupt */ + mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); } -static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) +static void mixer_update_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct mixer_context *mixer_ctx = crtc->ctx; - DRM_DEBUG_KMS("win: %d\n", win); + DRM_DEBUG_KMS("win: %d\n", plane->zpos); - mutex_lock(&mixer_ctx->mixer_mutex); - if (!mixer_ctx->powered) { - mutex_unlock(&mixer_ctx->mixer_mutex); + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - } - mutex_unlock(&mixer_ctx->mixer_mutex); - if (win > 1 && mixer_ctx->vp_enabled) - vp_video_buffer(mixer_ctx, win); + if (plane->zpos > 1 && mixer_ctx->vp_enabled) + vp_video_buffer(mixer_ctx, plane); else - mixer_graph_buffer(mixer_ctx, win); + mixer_graph_buffer(mixer_ctx, plane); } -static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) +static void mixer_disable_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct mixer_context *mixer_ctx = crtc->ctx; struct mixer_resources *res = &mixer_ctx->mixer_res; unsigned long flags; - DRM_DEBUG_KMS("win: %d\n", win); + DRM_DEBUG_KMS("win: %d\n", plane->zpos); - mutex_lock(&mixer_ctx->mixer_mutex); - if (!mixer_ctx->powered) { - mutex_unlock(&mixer_ctx->mixer_mutex); + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - } - mutex_unlock(&mixer_ctx->mixer_mutex); spin_lock_irqsave(&res->reg_slock, flags); mixer_vsync_set_update(mixer_ctx, false); - mixer_cfg_layer(mixer_ctx, win, false); + mixer_cfg_layer(mixer_ctx, plane->zpos, false); mixer_vsync_set_update(mixer_ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); @@ -970,12 +972,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc) struct mixer_context *mixer_ctx = crtc->ctx; int err; - mutex_lock(&mixer_ctx->mixer_mutex); - if (!mixer_ctx->powered) { - mutex_unlock(&mixer_ctx->mixer_mutex); + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - } - mutex_unlock(&mixer_ctx->mixer_mutex); err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe); if (err < 0) { @@ -1003,13 +1001,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) struct mixer_resources *res = &ctx->mixer_res; int ret; - mutex_lock(&ctx->mixer_mutex); - if (ctx->powered) { - mutex_unlock(&ctx->mixer_mutex); + if (test_bit(MXR_BIT_POWERED, &ctx->flags)) return; - } - - mutex_unlock(&ctx->mixer_mutex); pm_runtime_get_sync(ctx->dev); @@ -1041,13 +1034,14 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) } } - mutex_lock(&ctx->mixer_mutex); - ctx->powered = true; - mutex_unlock(&ctx->mixer_mutex); + set_bit(MXR_BIT_POWERED, &ctx->flags); mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); - mixer_reg_write(res, MXR_INT_EN, ctx->int_en); + if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) { + mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); + mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); + } mixer_win_reset(ctx); } @@ -1057,24 +1051,16 @@ static void mixer_disable(struct exynos_drm_crtc *crtc) struct mixer_resources *res = &ctx->mixer_res; int i; - mutex_lock(&ctx->mixer_mutex); - if (!ctx->powered) { - mutex_unlock(&ctx->mixer_mutex); + if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) return; - } - mutex_unlock(&ctx->mixer_mutex); mixer_stop(ctx); mixer_regs_dump(ctx); for (i = 0; i < MIXER_WIN_NR; i++) - mixer_win_disable(crtc, i); - - ctx->int_en = mixer_reg_read(res, MXR_INT_EN); + mixer_disable_plane(crtc, &ctx->planes[i]); - mutex_lock(&ctx->mixer_mutex); - ctx->powered = false; - mutex_unlock(&ctx->mixer_mutex); + clear_bit(MXR_BIT_POWERED, &ctx->flags); clk_disable_unprepare(res->hdmi); clk_disable_unprepare(res->mixer); @@ -1113,8 +1099,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = { .enable_vblank = mixer_enable_vblank, .disable_vblank = mixer_disable_vblank, .wait_for_vblank = mixer_wait_for_vblank, - .win_commit = mixer_win_commit, - .win_disable = mixer_win_disable, + .update_plane = mixer_update_plane, + .disable_plane = mixer_disable_plane, }; static struct mixer_drv_data exynos5420_mxr_drv_data = { @@ -1236,8 +1222,6 @@ static int mixer_probe(struct platform_device *pdev) return -ENOMEM; } - mutex_init(&ctx->mixer_mutex); - if (dev->of_node) { const struct of_device_id *match; diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig new file mode 100644 index 000000000000..c78cf3f605d0 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -0,0 +1,18 @@ +config DRM_FSL_DCU + tristate "DRM Support for Freescale DCU" + depends on DRM && OF && ARM + select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_KMS_FB_HELPER + select DRM_PANEL + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select FB_SYS_FOPS + select REGMAP_MMIO + select VIDEOMODE_HELPERS + help + Choose this option if you have an Freescale DCU chipset. + If M is selected the module will be called fsl-dcu-drm. diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile new file mode 100644 index 000000000000..6ea1523ae6ec --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/Makefile @@ -0,0 +1,7 @@ +fsl-dcu-drm-y := fsl_dcu_drm_drv.o \ + fsl_dcu_drm_kms.o \ + fsl_dcu_drm_rgb.o \ + fsl_dcu_drm_plane.o \ + fsl_dcu_drm_crtc.o \ + fsl_dcu_drm_fbdev.o +obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu-drm.o diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c new file mode 100644 index 000000000000..82a3d311e164 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -0,0 +1,210 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/regmap.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> + +#include "fsl_dcu_drm_crtc.h" +#include "fsl_dcu_drm_drv.h" +#include "fsl_dcu_drm_plane.h" + +static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ +} + +static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + return 0; +} + +static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ +} + +static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + int ret; + + ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_OFF)); + if (ret) + dev_err(fsl_dev->dev, "Disable CRTC failed\n"); + ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); + if (ret) + dev_err(fsl_dev->dev, "Enable CRTC failed\n"); +} + +static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + int ret; + + ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); + if (ret) + dev_err(fsl_dev->dev, "Enable CRTC failed\n"); + ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); + if (ret) + dev_err(fsl_dev->dev, "Enable CRTC failed\n"); +} + +static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->mode; + unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index; + unsigned long dcuclk; + int ret; + + index = drm_crtc_index(crtc); + dcuclk = clk_get_rate(fsl_dev->clk); + div = dcuclk / mode->clock / 1000; + + /* Configure timings: */ + hbp = mode->htotal - mode->hsync_end; + hfp = mode->hsync_start - mode->hdisplay; + hsw = mode->hsync_end - mode->hsync_start; + vbp = mode->vtotal - mode->vsync_end; + vfp = mode->vsync_start - mode->vdisplay; + vsw = mode->vsync_end - mode->vsync_start; + + ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA, + DCU_HSYN_PARA_BP(hbp) | + DCU_HSYN_PARA_PW(hsw) | + DCU_HSYN_PARA_FP(hfp)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA, + DCU_VSYN_PARA_BP(vbp) | + DCU_VSYN_PARA_PW(vsw) | + DCU_VSYN_PARA_FP(vfp)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE, + DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) | + DCU_DISP_SIZE_DELTA_X(mode->hdisplay)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL, + DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) | + DCU_BGND_G(0) | DCU_BGND_B(0)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD, + DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | + DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | + DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); + if (ret) + goto set_failed; + return; +set_failed: + dev_err(dev->dev, "set DCU register failed\n"); +} + +static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { + .atomic_begin = fsl_dcu_drm_crtc_atomic_begin, + .atomic_check = fsl_dcu_drm_crtc_atomic_check, + .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, + .disable = fsl_dcu_drm_disable_crtc, + .enable = fsl_dcu_drm_crtc_enable, + .mode_fixup = fsl_dcu_drm_crtc_mode_fixup, + .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, +}; + +static const struct drm_crtc_funcs fsl_dcu_drm_crtc_funcs = { + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .set_config = drm_atomic_helper_set_config, +}; + +int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) +{ + struct drm_plane *primary; + struct drm_crtc *crtc = &fsl_dev->crtc; + unsigned int i, j, reg_num; + int ret; + + primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); + ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL, + &fsl_dcu_drm_crtc_funcs); + if (ret < 0) + return ret; + + drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs); + + if (!strcmp(fsl_dev->soc->name, "ls1021a")) + reg_num = LS1021A_LAYER_REG_NUM; + else + reg_num = VF610_LAYER_REG_NUM; + for (i = 0; i <= fsl_dev->soc->total_layer; i++) { + for (j = 0; j < reg_num; j++) { + ret = regmap_write(fsl_dev->regmap, + DCU_CTRLDESCLN(i, j), 0); + if (ret) + goto init_failed; + } + } + ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_OFF)); + if (ret) + goto init_failed; + ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); + if (ret) + goto init_failed; + + return 0; +init_failed: + dev_err(fsl_dev->dev, "init DCU register failed\n"); + return ret; +} diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h new file mode 100644 index 000000000000..43d4da2c5fe5 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h @@ -0,0 +1,19 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __FSL_DCU_DRM_CRTC_H__ +#define __FSL_DCU_DRM_CRTC_H__ + +struct fsl_dcu_drm_device; + +int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev); + +#endif /* __FSL_DCU_DRM_CRTC_H__ */ diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c new file mode 100644 index 000000000000..9a8e2da47158 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -0,0 +1,404 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_gem_cma_helper.h> + +#include "fsl_dcu_drm_crtc.h" +#include "fsl_dcu_drm_drv.h" + +static const struct regmap_config fsl_dcu_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .cache_type = REGCACHE_RBTREE, +}; + +static int fsl_dcu_drm_irq_init(struct drm_device *dev) +{ + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + unsigned int value; + int ret; + + ret = drm_irq_install(dev, fsl_dev->irq); + if (ret < 0) + dev_err(dev->dev, "failed to install IRQ handler\n"); + + ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); + if (ret) + dev_err(dev->dev, "set DCU_INT_STATUS failed\n"); + ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); + if (ret) + dev_err(dev->dev, "read DCU_INT_MASK failed\n"); + value &= DCU_INT_MASK_VBLANK; + ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); + if (ret) + dev_err(dev->dev, "set DCU_INT_MASK failed\n"); + ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); + if (ret) + dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n"); + + return ret; +} + +static int fsl_dcu_load(struct drm_device *drm, unsigned long flags) +{ + struct device *dev = drm->dev; + struct fsl_dcu_drm_device *fsl_dev = drm->dev_private; + int ret; + + ret = fsl_dcu_drm_modeset_init(fsl_dev); + if (ret < 0) { + dev_err(dev, "failed to initialize mode setting\n"); + return ret; + } + + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + dev_err(dev, "failed to initialize vblank\n"); + goto done; + } + drm->vblank_disable_allowed = true; + + ret = fsl_dcu_drm_irq_init(drm); + if (ret < 0) + goto done; + drm->irq_enabled = true; + + fsl_dcu_fbdev_init(drm); + + return 0; +done: + if (ret) { + drm_mode_config_cleanup(drm); + drm_vblank_cleanup(drm); + drm_irq_uninstall(drm); + drm->dev_private = NULL; + } + + return ret; +} + +static int fsl_dcu_unload(struct drm_device *dev) +{ + drm_mode_config_cleanup(dev); + drm_vblank_cleanup(dev); + drm_irq_uninstall(dev); + + dev->dev_private = NULL; + + return 0; +} + +static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file) +{ +} + +static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + unsigned int int_status; + int ret; + + ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status); + if (ret) + dev_err(dev->dev, "set DCU_INT_STATUS failed\n"); + if (int_status & DCU_INT_STATUS_VBLANK) + drm_handle_vblank(dev, 0); + + ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff); + if (ret) + dev_err(dev->dev, "set DCU_INT_STATUS failed\n"); + ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); + if (ret) + dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n"); + + return IRQ_HANDLED; +} + +static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc) +{ + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + unsigned int value; + int ret; + + ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); + if (ret) + dev_err(dev->dev, "read DCU_INT_MASK failed\n"); + value &= ~DCU_INT_MASK_VBLANK; + ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); + if (ret) + dev_err(dev->dev, "set DCU_INT_MASK failed\n"); + return 0; +} + +static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc) +{ + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + unsigned int value; + int ret; + + ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); + if (ret) + dev_err(dev->dev, "read DCU_INT_MASK failed\n"); + value |= DCU_INT_MASK_VBLANK; + ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); + if (ret) + dev_err(dev->dev, "set DCU_INT_MASK failed\n"); +} + +static const struct file_operations fsl_dcu_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = drm_gem_cma_mmap, +}; + +static struct drm_driver fsl_dcu_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET + | DRIVER_PRIME | DRIVER_ATOMIC, + .load = fsl_dcu_load, + .unload = fsl_dcu_unload, + .preclose = fsl_dcu_drm_preclose, + .irq_handler = fsl_dcu_drm_irq, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = fsl_dcu_drm_enable_vblank, + .disable_vblank = fsl_dcu_drm_disable_vblank, + .gem_free_object = drm_gem_cma_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, + .dumb_create = drm_gem_cma_dumb_create, + .dumb_map_offset = drm_gem_cma_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .fops = &fsl_dcu_drm_fops, + .name = "fsl-dcu-drm", + .desc = "Freescale DCU DRM", + .date = "20150213", + .major = 1, + .minor = 0, +}; + +#ifdef CONFIG_PM_SLEEP +static int fsl_dcu_drm_pm_suspend(struct device *dev) +{ + struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); + + if (!fsl_dev) + return 0; + + drm_kms_helper_poll_disable(fsl_dev->drm); + regcache_cache_only(fsl_dev->regmap, true); + regcache_mark_dirty(fsl_dev->regmap); + clk_disable(fsl_dev->clk); + clk_unprepare(fsl_dev->clk); + + return 0; +} + +static int fsl_dcu_drm_pm_resume(struct device *dev) +{ + struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); + int ret; + + if (!fsl_dev) + return 0; + + ret = clk_enable(fsl_dev->clk); + if (ret < 0) { + dev_err(dev, "failed to enable dcu clk\n"); + clk_unprepare(fsl_dev->clk); + return ret; + } + ret = clk_prepare(fsl_dev->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare dcu clk\n"); + return ret; + } + + drm_kms_helper_poll_enable(fsl_dev->drm); + regcache_cache_only(fsl_dev->regmap, false); + regcache_sync(fsl_dev->regmap); + + return 0; +} +#endif + +static const struct dev_pm_ops fsl_dcu_drm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fsl_dcu_drm_pm_suspend, fsl_dcu_drm_pm_resume) +}; + +static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = { + .name = "ls1021a", + .total_layer = 16, + .max_layer = 4, +}; + +static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = { + .name = "vf610", + .total_layer = 64, + .max_layer = 6, +}; + +static const struct of_device_id fsl_dcu_of_match[] = { + { + .compatible = "fsl,ls1021a-dcu", + .data = &fsl_dcu_ls1021a_data, + }, { + .compatible = "fsl,vf610-dcu", + .data = &fsl_dcu_vf610_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, fsl_dcu_of_match); + +static int fsl_dcu_drm_probe(struct platform_device *pdev) +{ + struct fsl_dcu_drm_device *fsl_dev; + struct drm_device *drm; + struct device *dev = &pdev->dev; + struct resource *res; + void __iomem *base; + struct drm_driver *driver = &fsl_dcu_drm_driver; + const struct of_device_id *id; + int ret; + + fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL); + if (!fsl_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "could not get memory IO resource\n"); + return -ENODEV; + } + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + return ret; + } + + fsl_dev->irq = platform_get_irq(pdev, 0); + if (fsl_dev->irq < 0) { + dev_err(dev, "failed to get irq\n"); + return -ENXIO; + } + + fsl_dev->clk = devm_clk_get(dev, "dcu"); + if (IS_ERR(fsl_dev->clk)) { + ret = PTR_ERR(fsl_dev->clk); + dev_err(dev, "failed to get dcu clock\n"); + return ret; + } + ret = clk_prepare(fsl_dev->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare dcu clk\n"); + return ret; + } + ret = clk_enable(fsl_dev->clk); + if (ret < 0) { + dev_err(dev, "failed to enable dcu clk\n"); + clk_unprepare(fsl_dev->clk); + return ret; + } + + fsl_dev->regmap = devm_regmap_init_mmio(dev, base, + &fsl_dcu_regmap_config); + if (IS_ERR(fsl_dev->regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(fsl_dev->regmap); + } + + id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node); + if (!id) + return -ENODEV; + fsl_dev->soc = id->data; + + drm = drm_dev_alloc(driver, dev); + if (!drm) + return -ENOMEM; + + fsl_dev->dev = dev; + fsl_dev->drm = drm; + fsl_dev->np = dev->of_node; + drm->dev_private = fsl_dev; + dev_set_drvdata(dev, fsl_dev); + drm_dev_set_unique(drm, dev_name(dev)); + + ret = drm_dev_register(drm, 0); + if (ret < 0) + goto unref; + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, + driver->major, driver->minor, driver->patchlevel, + driver->date, drm->primary->index); + + return 0; + +unref: + drm_dev_unref(drm); + return ret; +} + +static int fsl_dcu_drm_remove(struct platform_device *pdev) +{ + struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); + + drm_put_dev(fsl_dev->drm); + + return 0; +} + +static struct platform_driver fsl_dcu_drm_platform_driver = { + .probe = fsl_dcu_drm_probe, + .remove = fsl_dcu_drm_remove, + .driver = { + .name = "fsl-dcu", + .pm = &fsl_dcu_drm_pm_ops, + .of_match_table = fsl_dcu_of_match, + }, +}; + +module_platform_driver(fsl_dcu_drm_platform_driver); + +MODULE_DESCRIPTION("Freescale DCU DRM Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h new file mode 100644 index 000000000000..579b9e44e764 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -0,0 +1,197 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __FSL_DCU_DRM_DRV_H__ +#define __FSL_DCU_DRM_DRV_H__ + +#include "fsl_dcu_drm_crtc.h" +#include "fsl_dcu_drm_output.h" +#include "fsl_dcu_drm_plane.h" + +#define DCU_DCU_MODE 0x0010 +#define DCU_MODE_BLEND_ITER(x) ((x) << 20) +#define DCU_MODE_RASTER_EN BIT(14) +#define DCU_MODE_DCU_MODE(x) (x) +#define DCU_MODE_DCU_MODE_MASK 0x03 +#define DCU_MODE_OFF 0 +#define DCU_MODE_NORMAL 1 +#define DCU_MODE_TEST 2 +#define DCU_MODE_COLORBAR 3 + +#define DCU_BGND 0x0014 +#define DCU_BGND_R(x) ((x) << 16) +#define DCU_BGND_G(x) ((x) << 8) +#define DCU_BGND_B(x) (x) + +#define DCU_DISP_SIZE 0x0018 +#define DCU_DISP_SIZE_DELTA_Y(x) ((x) << 16) +/*Regisiter value 1/16 of horizontal resolution*/ +#define DCU_DISP_SIZE_DELTA_X(x) ((x) >> 4) + +#define DCU_HSYN_PARA 0x001c +#define DCU_HSYN_PARA_BP(x) ((x) << 22) +#define DCU_HSYN_PARA_PW(x) ((x) << 11) +#define DCU_HSYN_PARA_FP(x) (x) + +#define DCU_VSYN_PARA 0x0020 +#define DCU_VSYN_PARA_BP(x) ((x) << 22) +#define DCU_VSYN_PARA_PW(x) ((x) << 11) +#define DCU_VSYN_PARA_FP(x) (x) + +#define DCU_SYN_POL 0x0024 +#define DCU_SYN_POL_INV_PXCK_FALL (0 << 6) +#define DCU_SYN_POL_NEG_REMAIN (0 << 5) +#define DCU_SYN_POL_INV_VS_LOW BIT(1) +#define DCU_SYN_POL_INV_HS_LOW BIT(0) + +#define DCU_THRESHOLD 0x0028 +#define DCU_THRESHOLD_LS_BF_VS(x) ((x) << 16) +#define DCU_THRESHOLD_OUT_BUF_HIGH(x) ((x) << 8) +#define DCU_THRESHOLD_OUT_BUF_LOW(x) (x) +#define BF_VS_VAL 0x03 +#define BUF_MAX_VAL 0x78 +#define BUF_MIN_VAL 0x0a + +#define DCU_INT_STATUS 0x002C +#define DCU_INT_STATUS_VSYNC BIT(0) +#define DCU_INT_STATUS_UNDRUN BIT(1) +#define DCU_INT_STATUS_LSBFVS BIT(2) +#define DCU_INT_STATUS_VBLANK BIT(3) +#define DCU_INT_STATUS_CRCREADY BIT(4) +#define DCU_INT_STATUS_CRCOVERFLOW BIT(5) +#define DCU_INT_STATUS_P1FIFOLO BIT(6) +#define DCU_INT_STATUS_P1FIFOHI BIT(7) +#define DCU_INT_STATUS_P2FIFOLO BIT(8) +#define DCU_INT_STATUS_P2FIFOHI BIT(9) +#define DCU_INT_STATUS_PROGEND BIT(10) +#define DCU_INT_STATUS_IPMERROR BIT(11) +#define DCU_INT_STATUS_LYRTRANS BIT(12) +#define DCU_INT_STATUS_DMATRANS BIT(14) +#define DCU_INT_STATUS_P3FIFOLO BIT(16) +#define DCU_INT_STATUS_P3FIFOHI BIT(17) +#define DCU_INT_STATUS_P4FIFOLO BIT(18) +#define DCU_INT_STATUS_P4FIFOHI BIT(19) +#define DCU_INT_STATUS_P1EMPTY BIT(26) +#define DCU_INT_STATUS_P2EMPTY BIT(27) +#define DCU_INT_STATUS_P3EMPTY BIT(28) +#define DCU_INT_STATUS_P4EMPTY BIT(29) + +#define DCU_INT_MASK 0x0030 +#define DCU_INT_MASK_VSYNC BIT(0) +#define DCU_INT_MASK_UNDRUN BIT(1) +#define DCU_INT_MASK_LSBFVS BIT(2) +#define DCU_INT_MASK_VBLANK BIT(3) +#define DCU_INT_MASK_CRCREADY BIT(4) +#define DCU_INT_MASK_CRCOVERFLOW BIT(5) +#define DCU_INT_MASK_P1FIFOLO BIT(6) +#define DCU_INT_MASK_P1FIFOHI BIT(7) +#define DCU_INT_MASK_P2FIFOLO BIT(8) +#define DCU_INT_MASK_P2FIFOHI BIT(9) +#define DCU_INT_MASK_PROGEND BIT(10) +#define DCU_INT_MASK_IPMERROR BIT(11) +#define DCU_INT_MASK_LYRTRANS BIT(12) +#define DCU_INT_MASK_DMATRANS BIT(14) +#define DCU_INT_MASK_P3FIFOLO BIT(16) +#define DCU_INT_MASK_P3FIFOHI BIT(17) +#define DCU_INT_MASK_P4FIFOLO BIT(18) +#define DCU_INT_MASK_P4FIFOHI BIT(19) +#define DCU_INT_MASK_P1EMPTY BIT(26) +#define DCU_INT_MASK_P2EMPTY BIT(27) +#define DCU_INT_MASK_P3EMPTY BIT(28) +#define DCU_INT_MASK_P4EMPTY BIT(29) + +#define DCU_DIV_RATIO 0x0054 + +#define DCU_UPDATE_MODE 0x00cc +#define DCU_UPDATE_MODE_MODE BIT(31) +#define DCU_UPDATE_MODE_READREG BIT(30) + +#define DCU_DCFB_MAX 0x300 + +#define DCU_CTRLDESCLN(layer, reg) (0x200 + (reg - 1) * 4 + (layer) * 0x40) + +#define DCU_LAYER_HEIGHT(x) ((x) << 16) +#define DCU_LAYER_WIDTH(x) (x) + +#define DCU_LAYER_POSY(x) ((x) << 16) +#define DCU_LAYER_POSX(x) (x) + +#define DCU_LAYER_EN BIT(31) +#define DCU_LAYER_TILE_EN BIT(30) +#define DCU_LAYER_DATA_SEL_CLUT BIT(29) +#define DCU_LAYER_SAFETY_EN BIT(28) +#define DCU_LAYER_TRANS(x) ((x) << 20) +#define DCU_LAYER_BPP(x) ((x) << 16) +#define DCU_LAYER_RLE_EN BIT(15) +#define DCU_LAYER_LUOFFS(x) ((x) << 4) +#define DCU_LAYER_BB_ON BIT(2) +#define DCU_LAYER_AB(x) (x) + +#define DCU_LAYER_CKMAX_R(x) ((x) << 16) +#define DCU_LAYER_CKMAX_G(x) ((x) << 8) +#define DCU_LAYER_CKMAX_B(x) (x) + +#define DCU_LAYER_CKMIN_R(x) ((x) << 16) +#define DCU_LAYER_CKMIN_G(x) ((x) << 8) +#define DCU_LAYER_CKMIN_B(x) (x) + +#define DCU_LAYER_TILE_VER(x) ((x) << 16) +#define DCU_LAYER_TILE_HOR(x) (x) + +#define DCU_LAYER_FG_FCOLOR(x) (x) + +#define DCU_LAYER_BG_BCOLOR(x) (x) + +#define DCU_LAYER_POST_SKIP(x) ((x) << 16) +#define DCU_LAYER_PRE_SKIP(x) (x) + +#define FSL_DCU_RGB565 4 +#define FSL_DCU_RGB888 5 +#define FSL_DCU_ARGB8888 6 +#define FSL_DCU_ARGB1555 11 +#define FSL_DCU_ARGB4444 12 +#define FSL_DCU_YUV422 14 + +#define VF610_LAYER_REG_NUM 9 +#define LS1021A_LAYER_REG_NUM 10 + +struct clk; +struct device; +struct drm_device; + +struct fsl_dcu_soc_data { + const char *name; + /*total layer number*/ + unsigned int total_layer; + /*max layer number DCU supported*/ + unsigned int max_layer; +}; + +struct fsl_dcu_drm_device { + struct device *dev; + struct device_node *np; + struct regmap *regmap; + int irq; + struct clk *clk; + /*protects hardware register*/ + spinlock_t irq_lock; + struct drm_device *drm; + struct drm_fbdev_cma *fbdev; + struct drm_crtc crtc; + struct drm_encoder encoder; + struct fsl_dcu_drm_connector connector; + const struct fsl_dcu_soc_data *soc; +}; + +void fsl_dcu_fbdev_init(struct drm_device *dev); +int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev); + +#endif /* __FSL_DCU_DRM_DRV_H__ */ diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c new file mode 100644 index 000000000000..8b8b819ea704 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c @@ -0,0 +1,23 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_fb_cma_helper.h> + +#include "fsl_dcu_drm_drv.h" + +/* initialize fbdev helper */ +void fsl_dcu_fbdev_init(struct drm_device *dev) +{ + struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev); + + fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1); +} diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c new file mode 100644 index 000000000000..0ef5959710e7 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c @@ -0,0 +1,43 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> + +#include "fsl_dcu_drm_crtc.h" +#include "fsl_dcu_drm_drv.h" + +static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = { + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = drm_fb_cma_create, +}; + +int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) +{ + drm_mode_config_init(fsl_dev->drm); + + fsl_dev->drm->mode_config.min_width = 0; + fsl_dev->drm->mode_config.min_height = 0; + fsl_dev->drm->mode_config.max_width = 2031; + fsl_dev->drm->mode_config.max_height = 2047; + fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs; + + drm_kms_helper_poll_init(fsl_dev->drm); + fsl_dcu_drm_crtc_create(fsl_dev); + fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); + fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); + drm_mode_config_reset(fsl_dev->drm); + + return 0; +} diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h new file mode 100644 index 000000000000..7093109fbc21 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __FSL_DCU_DRM_CONNECTOR_H__ +#define __FSL_DCU_DRM_CONNECTOR_H__ + +struct fsl_dcu_drm_connector { + struct drm_connector base; + struct drm_encoder *encoder; + struct drm_panel *panel; +}; + +static inline struct fsl_dcu_drm_connector * +to_fsl_dcu_connector(struct drm_connector *con) +{ + return con ? container_of(con, struct fsl_dcu_drm_connector, base) + : NULL; +} + +int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev, + struct drm_encoder *encoder); +int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, + struct drm_crtc *crtc); + +#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c new file mode 100644 index 000000000000..82be6b86a168 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -0,0 +1,261 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/regmap.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> + +#include "fsl_dcu_drm_drv.h" +#include "fsl_dcu_drm_plane.h" + +static int fsl_dcu_drm_plane_index(struct drm_plane *plane) +{ + struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; + unsigned int total_layer = fsl_dev->soc->total_layer; + unsigned int index; + + index = drm_plane_index(plane); + if (index < total_layer) + return total_layer - index - 1; + + dev_err(fsl_dev->dev, "No more layer left\n"); + return -EINVAL; +} + +static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_framebuffer *fb = state->fb; + + switch (fb->pixel_format) { + case DRM_FORMAT_RGB565: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_YUV422: + return 0; + default: + return -EINVAL; + } +} + +static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; + unsigned int index, value, ret; + + index = fsl_dcu_drm_plane_index(plane); + if (index < 0) + return; + + ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value); + if (ret) + dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n"); + value &= ~DCU_LAYER_EN; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value); + if (ret) + dev_err(fsl_dev->dev, "set DCU register failed\n"); +} + +static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) + +{ + struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; + struct drm_plane_state *state = plane->state; + struct drm_framebuffer *fb = plane->state->fb; + struct drm_gem_cma_object *gem; + unsigned int alpha, bpp; + int index, ret; + + if (!fb) + return; + + index = fsl_dcu_drm_plane_index(plane); + if (index < 0) + return; + + gem = drm_fb_cma_get_gem_obj(fb, 0); + + switch (fb->pixel_format) { + case DRM_FORMAT_RGB565: + bpp = FSL_DCU_RGB565; + alpha = 0xff; + break; + case DRM_FORMAT_RGB888: + bpp = FSL_DCU_RGB888; + alpha = 0xff; + break; + case DRM_FORMAT_ARGB8888: + bpp = FSL_DCU_ARGB8888; + alpha = 0xff; + break; + case DRM_FORMAT_BGRA4444: + bpp = FSL_DCU_ARGB4444; + alpha = 0xff; + break; + case DRM_FORMAT_ARGB1555: + bpp = FSL_DCU_ARGB1555; + alpha = 0xff; + break; + case DRM_FORMAT_YUV422: + bpp = FSL_DCU_YUV422; + alpha = 0xff; + break; + default: + return; + } + + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1), + DCU_LAYER_HEIGHT(state->crtc_h) | + DCU_LAYER_WIDTH(state->crtc_w)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2), + DCU_LAYER_POSY(state->crtc_y) | + DCU_LAYER_POSX(state->crtc_x)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, + DCU_CTRLDESCLN(index, 3), gem->paddr); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), + DCU_LAYER_EN | + DCU_LAYER_TRANS(alpha) | + DCU_LAYER_BPP(bpp) | + DCU_LAYER_AB(0)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5), + DCU_LAYER_CKMAX_R(0xFF) | + DCU_LAYER_CKMAX_G(0xFF) | + DCU_LAYER_CKMAX_B(0xFF)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6), + DCU_LAYER_CKMIN_R(0) | + DCU_LAYER_CKMIN_G(0) | + DCU_LAYER_CKMIN_B(0)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8), + DCU_LAYER_FG_FCOLOR(0)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9), + DCU_LAYER_BG_BCOLOR(0)); + if (ret) + goto set_failed; + if (!strcmp(fsl_dev->soc->name, "ls1021a")) { + ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10), + DCU_LAYER_POST_SKIP(0) | + DCU_LAYER_PRE_SKIP(0)); + if (ret) + goto set_failed; + } + ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); + if (ret) + goto set_failed; + ret = regmap_write(fsl_dev->regmap, + DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); + if (ret) + goto set_failed; + return; + +set_failed: + dev_err(fsl_dev->dev, "set DCU register failed\n"); +} + +static void +fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane, + struct drm_framebuffer *fb, + const struct drm_plane_state *new_state) +{ +} + +static int +fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane, + struct drm_framebuffer *fb, + const struct drm_plane_state *new_state) +{ + return 0; +} + +static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = { + .atomic_check = fsl_dcu_drm_plane_atomic_check, + .atomic_disable = fsl_dcu_drm_plane_atomic_disable, + .atomic_update = fsl_dcu_drm_plane_atomic_update, + .cleanup_fb = fsl_dcu_drm_plane_cleanup_fb, + .prepare_fb = fsl_dcu_drm_plane_prepare_fb, +}; + +static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane) +{ + drm_plane_cleanup(plane); +} + +static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = { + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .destroy = fsl_dcu_drm_plane_destroy, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = drm_atomic_helper_plane_reset, + .update_plane = drm_atomic_helper_update_plane, +}; + +static const u32 fsl_dcu_drm_plane_formats[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_YUV422, +}; + +struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) +{ + struct drm_plane *primary; + int ret; + + primary = kzalloc(sizeof(*primary), GFP_KERNEL); + if (!primary) { + DRM_DEBUG_KMS("Failed to allocate primary plane\n"); + return NULL; + } + + /* possible_crtc's will be filled in later by crtc_init */ + ret = drm_universal_plane_init(dev, primary, 0, + &fsl_dcu_drm_plane_funcs, + fsl_dcu_drm_plane_formats, + ARRAY_SIZE(fsl_dcu_drm_plane_formats), + DRM_PLANE_TYPE_PRIMARY); + if (ret) { + kfree(primary); + primary = NULL; + } + drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs); + + return primary; +} diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h new file mode 100644 index 000000000000..d657f088d859 --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h @@ -0,0 +1,17 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __FSL_DCU_DRM_PLANE_H__ +#define __FSL_DCU_DRM_PLANE_H__ + +struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev); + +#endif /* __FSL_DCU_DRM_PLANE_H__ */ diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c new file mode 100644 index 000000000000..fe8ab5da04fb --- /dev/null +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -0,0 +1,182 @@ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * Freescale DCU drm device driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/backlight.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_panel.h> + +#include "fsl_dcu_drm_drv.h" + +static int +fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return 0; +} + +static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder) +{ +} + +static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder) +{ +} + +static const struct drm_encoder_helper_funcs encoder_helper_funcs = { + .atomic_check = fsl_dcu_drm_encoder_atomic_check, + .disable = fsl_dcu_drm_encoder_disable, + .enable = fsl_dcu_drm_encoder_enable, +}; + +static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs encoder_funcs = { + .destroy = fsl_dcu_drm_encoder_destroy, +}; + +int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, + struct drm_crtc *crtc) +{ + struct drm_encoder *encoder = &fsl_dev->encoder; + int ret; + + encoder->possible_crtcs = 1; + ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, + DRM_MODE_ENCODER_LVDS); + if (ret < 0) + return ret; + + drm_encoder_helper_add(encoder, &encoder_helper_funcs); + + return 0; +} + +static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static enum drm_connector_status +fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = { + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .destroy = fsl_dcu_drm_connector_destroy, + .detect = fsl_dcu_drm_connector_detect, + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .reset = drm_atomic_helper_connector_reset, +}; + +static struct drm_encoder * +fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector) +{ + struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector); + + return fsl_con->encoder; +} + +static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) +{ + struct fsl_dcu_drm_connector *fsl_connector; + int (*get_modes)(struct drm_panel *panel); + int num_modes = 0; + + fsl_connector = to_fsl_dcu_connector(connector); + if (fsl_connector->panel && fsl_connector->panel->funcs && + fsl_connector->panel->funcs->get_modes) { + get_modes = fsl_connector->panel->funcs->get_modes; + num_modes = get_modes(fsl_connector->panel); + } + + return num_modes; +} + +static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + if (mode->hdisplay & 0xf) + return MODE_ERROR; + + return MODE_OK; +} + +static const struct drm_connector_helper_funcs connector_helper_funcs = { + .best_encoder = fsl_dcu_drm_connector_best_encoder, + .get_modes = fsl_dcu_drm_connector_get_modes, + .mode_valid = fsl_dcu_drm_connector_mode_valid, +}; + +int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev, + struct drm_encoder *encoder) +{ + struct drm_connector *connector = &fsl_dev->connector.base; + struct drm_mode_config mode_config = fsl_dev->drm->mode_config; + struct device_node *panel_node; + int ret; + + fsl_dev->connector.encoder = encoder; + + ret = drm_connector_init(fsl_dev->drm, connector, + &fsl_dcu_drm_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + if (ret < 0) + return ret; + + drm_connector_helper_add(connector, &connector_helper_funcs); + ret = drm_connector_register(connector); + if (ret < 0) + goto err_cleanup; + + ret = drm_mode_connector_attach_encoder(connector, encoder); + if (ret < 0) + goto err_sysfs; + + drm_object_property_set_value(&connector->base, + mode_config.dpms_property, + DRM_MODE_DPMS_OFF); + + panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0); + if (panel_node) { + fsl_dev->connector.panel = of_drm_find_panel(panel_node); + if (!fsl_dev->connector.panel) { + ret = -EPROBE_DEFER; + goto err_sysfs; + } + of_node_put(panel_node); + } + + ret = drm_panel_attach(fsl_dev->connector.panel, connector); + if (ret) { + dev_err(fsl_dev->dev, "failed to attach panel\n"); + goto err_sysfs; + } + + return 0; + +err_sysfs: + drm_connector_unregister(connector); +err_cleanup: + drm_connector_cleanup(connector); + return ret; +} diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c index de6f62a6ceb7..db9f7d011832 100644 --- a/drivers/gpu/drm/gma500/accel_2d.c +++ b/drivers/gpu/drm/gma500/accel_2d.c @@ -276,12 +276,12 @@ static void psbfb_copyarea_accel(struct fb_info *info, break; default: /* software fallback */ - cfb_copyarea(info, a); + drm_fb_helper_cfb_copyarea(info, a); return; } if (!gma_power_begin(dev, false)) { - cfb_copyarea(info, a); + drm_fb_helper_cfb_copyarea(info, a); return; } psb_accel_2d_copy(dev_priv, @@ -308,7 +308,7 @@ void psbfb_copyarea(struct fb_info *info, /* Avoid the 8 pixel erratum */ if (region->width == 8 || region->height == 8 || (info->flags & FBINFO_HWACCEL_DISABLED)) - return cfb_copyarea(info, region); + return drm_fb_helper_cfb_copyarea(info, region); psbfb_copyarea_accel(info, region); } diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 2d42ce6d3757..2eaf1b31c7bd 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -194,9 +194,9 @@ static struct fb_ops psbfb_ops = { .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, .fb_setcolreg = psbfb_setcolreg, - .fb_fillrect = cfb_fillrect, + .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = psbfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_mmap = psbfb_mmap, .fb_sync = psbfb_sync, .fb_ioctl = psbfb_ioctl, @@ -208,9 +208,9 @@ static struct fb_ops psbfb_roll_ops = { .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, .fb_setcolreg = psbfb_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = psbfb_pan, .fb_mmap = psbfb_mmap, .fb_ioctl = psbfb_ioctl, @@ -222,9 +222,9 @@ static struct fb_ops psbfb_unaccel_ops = { .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, .fb_setcolreg = psbfb_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_mmap = psbfb_mmap, .fb_ioctl = psbfb_ioctl, }; @@ -343,7 +343,6 @@ static int psbfb_create(struct psb_fbdev *fbdev, struct drm_framebuffer *fb; struct psb_framebuffer *psbfb = &fbdev->pfb; struct drm_mode_fb_cmd2 mode_cmd; - struct device *device = &dev->pdev->dev; int size; int ret; struct gtt_range *backing; @@ -409,9 +408,9 @@ static int psbfb_create(struct psb_fbdev *fbdev, mutex_lock(&dev->struct_mutex); - info = framebuffer_alloc(0, device); - if (!info) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_err1; } info->par = fbdev; @@ -426,7 +425,6 @@ static int psbfb_create(struct psb_fbdev *fbdev, psbfb->fbdev = info; fbdev->psb_fb_helper.fb = fb; - fbdev->psb_fb_helper.fbdev = info; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); strcpy(info->fix.id, "psbdrmfb"); @@ -440,12 +438,6 @@ static int psbfb_create(struct psb_fbdev *fbdev, } else /* Software */ info->fbops = &psbfb_unaccel_ops; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out_unref; - } - info->fix.smem_start = dev->mode_config.fb_base; info->fix.smem_len = size; info->fix.ywrapstep = gtt_roll; @@ -456,11 +448,6 @@ static int psbfb_create(struct psb_fbdev *fbdev, info->screen_size = size; if (dev_priv->gtt.stolen_size) { - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_unref; - } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->gtt.stolen_size; } @@ -483,6 +470,8 @@ out_unref: psb_gtt_free_range(dev, backing); else drm_gem_object_unreference(&backing->gem); + + drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); out_err1: mutex_unlock(&dev->struct_mutex); psb_gtt_free_range(dev, backing); @@ -570,16 +559,11 @@ static const struct drm_fb_helper_funcs psb_fb_helper_funcs = { static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) { - struct fb_info *info; struct psb_framebuffer *psbfb = &fbdev->pfb; - if (fbdev->psb_fb_helper.fbdev) { - info = fbdev->psb_fb_helper.fbdev; - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper); + drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); + drm_fb_helper_fini(&fbdev->psb_fb_helper); drm_framebuffer_unregister_private(&psbfb->base); drm_framebuffer_cleanup(&psbfb->base); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index eb87e2538861..051eab33e4c7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -36,21 +36,6 @@ config DRM_I915 i810 driver instead, and the Atom z5xx series has an entirely different implementation. -config DRM_I915_FBDEV - bool "Enable legacy fbdev support for the modesetting intel driver" - depends on DRM_I915 - select DRM_KMS_FB_HELPER - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - default y - help - Choose this option if you have a need for the legacy fbdev - support. Note that this support also provide the linux console - support on top of the intel modesetting driver. - - If in doubt, say "Y". - config DRM_I915_PRELIMINARY_HW_SUPPORT bool "Enable preliminary support for prerelease Intel hardware by default" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index ddb69f337dc6..44d290ae1999 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -66,7 +66,7 @@ i915-y += intel_audio.o \ intel_sideband.o \ intel_sprite.o i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o -i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o +i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o # modesetting output/encoder code i915-y += dvo_ch7017.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2f1b693d7162..4563f8b955ea 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1863,7 +1863,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct intel_framebuffer *fb; struct drm_framebuffer *drm_fb; -#ifdef CONFIG_DRM_I915_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_i915_private *dev_priv = dev->dev_private; ifbdev = dev_priv->fbdev; @@ -2700,6 +2700,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain) return "PORT_DDI_D_2_LANES"; case POWER_DOMAIN_PORT_DDI_D_4_LANES: return "PORT_DDI_D_4_LANES"; + case POWER_DOMAIN_PORT_DDI_E_2_LANES: + return "PORT_DDI_E_2_LANES"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4646fe1a0499..4737d15de5f0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -665,15 +665,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) pci_disable_device(drm_dev->pdev); /* - * During hibernation on some GEN4 platforms the BIOS may try to access + * During hibernation on some platforms the BIOS may try to access * the device even though it's already in D3 and hang the machine. So * leave the device in D0 on those platforms and hope the BIOS will - * power down the device properly. Platforms where this was seen: - * Lenovo Thinkpad X301, X61s + * power down the device properly. The issue was seen on multiple old + * GENs with different BIOS vendors, so having an explicit blacklist + * is inpractical; apply the workaround on everything pre GEN6. The + * platforms where the issue was seen: + * Lenovo Thinkpad X301, X61s, X60, T60, X41 + * Fujitsu FSC S7110 + * Acer Aspire 1830T */ - if (!(hibernation && - drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && - INTEL_INFO(dev_priv)->gen == 4)) + if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) pci_set_power_state(drm_dev->pdev, PCI_D3hot); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1d99402e4a1e..4eabe19a684f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -188,6 +188,7 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_C_4_LANES, POWER_DOMAIN_PORT_DDI_D_2_LANES, POWER_DOMAIN_PORT_DDI_D_4_LANES, + POWER_DOMAIN_PORT_DDI_E_2_LANES, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -220,6 +221,7 @@ enum hpd_pin { HPD_PORT_B, HPD_PORT_C, HPD_PORT_D, + HPD_PORT_E, HPD_NUM_PINS }; @@ -1421,6 +1423,10 @@ enum modeset_restore { #define DP_AUX_C 0x20 #define DP_AUX_D 0x30 +#define DDC_PIN_B 0x05 +#define DDC_PIN_C 0x04 +#define DDC_PIN_D 0x06 + struct ddi_vbt_port_info { /* * This is an index in the HDMI/DVI DDI buffer translation table. @@ -1435,6 +1441,7 @@ struct ddi_vbt_port_info { uint8_t supports_dp:1; uint8_t alternate_aux_channel; + uint8_t alternate_ddc_pin; uint8_t dp_boost_level; uint8_t hdmi_boost_level; @@ -1875,7 +1882,7 @@ struct drm_i915_private { struct drm_i915_gem_object *vlv_pctx; -#ifdef CONFIG_DRM_I915_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION /* list of fbdev register on this device */ struct intel_fbdev *fbdev; struct work_struct fbdev_suspend_work; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index a36cb95ec798..f361c4a56995 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -348,7 +348,7 @@ int i915_gem_init_stolen(struct drm_device *dev) * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; - DRM_DEBUG_KMS("Memory reserved for graphics device: %luK, usable: %luK\n", + DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", dev_priv->gtt.stolen_size >> 10, (dev_priv->gtt.stolen_size - reserved_total) >> 10); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a05104562932..8485bea966cc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -61,6 +61,13 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = { [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT }; +static const u32 hpd_spt[HPD_NUM_PINS] = { + [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, + [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, + [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, + [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT +}; + static const u32 hpd_mask_i915[HPD_NUM_PINS] = { [HPD_CRT] = CRT_HOTPLUG_INT_EN, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, @@ -1253,6 +1260,8 @@ static bool pch_port_hotplug_long_detect(enum port port, u32 val) return val & PORTC_HOTPLUG_LONG_DETECT; case PORT_D: return val & PORTD_HOTPLUG_LONG_DETECT; + case PORT_E: + return val & PORTE_HOTPLUG_LONG_DETECT; default: return false; } @@ -1753,7 +1762,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; + u32 hotplug_trigger; + + if (HAS_PCH_SPT(dev)) + hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT; + else + hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; if (hotplug_trigger) { u32 dig_hotplug_reg, pin_mask, long_mask; @@ -1761,9 +1775,23 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); - intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, - dig_hotplug_reg, hpd_cpt, - pch_port_hotplug_long_detect); + if (HAS_PCH_SPT(dev)) { + intel_get_hpd_pins(&pin_mask, &long_mask, + hotplug_trigger, + dig_hotplug_reg, hpd_spt, + pch_port_hotplug_long_detect); + + /* detect PORTE HP event */ + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); + if (pch_port_hotplug_long_detect(PORT_E, + dig_hotplug_reg)) + long_mask |= 1 << HPD_PORT_E; + } else + intel_get_hpd_pins(&pin_mask, &long_mask, + hotplug_trigger, + dig_hotplug_reg, hpd_cpt, + pch_port_hotplug_long_detect); + intel_hpd_irq_handler(dev, pin_mask, long_mask); } @@ -2985,6 +3013,11 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) for_each_intel_encoder(dev, intel_encoder) if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; + } else if (HAS_PCH_SPT(dev)) { + hotplug_irqs = SDE_HOTPLUG_MASK_SPT; + for_each_intel_encoder(dev, intel_encoder) + if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) + enabled_irqs |= hpd_spt[intel_encoder->hpd_pin]; } else { hotplug_irqs = SDE_HOTPLUG_MASK_CPT; for_each_intel_encoder(dev, intel_encoder) @@ -3006,6 +3039,13 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); + + /* enable SPT PORTE hot plug */ + if (HAS_PCH_SPT(dev)) { + hotplug = I915_READ(PCH_PORT_HOTPLUG2); + hotplug |= PORTE_HOTPLUG_ENABLE; + I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); + } } static void bxt_hpd_irq_setup(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c82db2aed55c..e7c9dc8e24fe 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5981,6 +5981,7 @@ enum skl_disp_power_wells { #define SDE_AUXC_CPT (1 << 26) #define SDE_AUXB_CPT (1 << 25) #define SDE_AUX_MASK_CPT (7 << 25) +#define SDE_PORTE_HOTPLUG_SPT (1 << 25) #define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTC_HOTPLUG_CPT (1 << 22) #define SDE_PORTB_HOTPLUG_CPT (1 << 21) @@ -5991,6 +5992,10 @@ enum skl_disp_power_wells { SDE_PORTD_HOTPLUG_CPT | \ SDE_PORTC_HOTPLUG_CPT | \ SDE_PORTB_HOTPLUG_CPT) +#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \ + SDE_PORTD_HOTPLUG_CPT | \ + SDE_PORTC_HOTPLUG_CPT | \ + SDE_PORTB_HOTPLUG_CPT) #define SDE_GMBUS_CPT (1 << 17) #define SDE_ERROR_CPT (1 << 16) #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) @@ -6062,6 +6067,13 @@ enum skl_disp_power_wells { #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) +#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 */ +#define PORTE_HOTPLUG_ENABLE (1 << 4) +#define PORTE_HOTPLUG_STATUS_MASK (0x3 << 0) +#define PORTE_HOTPLUG_NO_DETECT (0 << 0) +#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) +#define PORTE_HOTPLUG_LONG_DETECT (2 << 0) + #define PCH_GPIOA 0xc5010 #define PCH_GPIOB 0xc5014 #define PCH_GPIOC 0xc5018 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 8e46149bafdd..b3e437b3bb54 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -401,7 +401,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, { struct sdvo_device_mapping *p_mapping; const struct bdb_general_definitions *p_defs; - const union child_device_config *p_child; + const struct old_child_dev_config *child; /* legacy */ int i, child_device_num, count; u16 block_size; @@ -410,14 +410,14 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); return; } - /* judge whether the size of child device meets the requirements. - * If the child device size obtained from general definition block - * is different with sizeof(struct child_device_config), skip the - * parsing of sdvo device info + + /* + * Only parse SDVO mappings when the general definitions block child + * device size matches that of the *legacy* child device config + * struct. Thus, SDVO mapping will be skipped for newer VBT. */ - if (p_defs->child_dev_size != sizeof(*p_child)) { - /* different child dev size . Ignore it */ - DRM_DEBUG_KMS("different child size is found. Invalid.\n"); + if (p_defs->child_dev_size != sizeof(*child)) { + DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n"); return; } /* get the block size of general definitions */ @@ -427,37 +427,37 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, p_defs->child_dev_size; count = 0; for (i = 0; i < child_device_num; i++) { - p_child = child_device_ptr(p_defs, i); - if (!p_child->old.device_type) { + child = &child_device_ptr(p_defs, i)->old; + if (!child->device_type) { /* skip the device block if device type is invalid */ continue; } - if (p_child->old.slave_addr != SLAVE_ADDR1 && - p_child->old.slave_addr != SLAVE_ADDR2) { + if (child->slave_addr != SLAVE_ADDR1 && + child->slave_addr != SLAVE_ADDR2) { /* * If the slave address is neither 0x70 nor 0x72, * it is not a SDVO device. Skip it. */ continue; } - if (p_child->old.dvo_port != DEVICE_PORT_DVOB && - p_child->old.dvo_port != DEVICE_PORT_DVOC) { + if (child->dvo_port != DEVICE_PORT_DVOB && + child->dvo_port != DEVICE_PORT_DVOC) { /* skip the incorrect SDVO port */ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); continue; } DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" - " %s port\n", - p_child->old.slave_addr, - (p_child->old.dvo_port == DEVICE_PORT_DVOB) ? - "SDVOB" : "SDVOC"); - p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]); + " %s port\n", + child->slave_addr, + (child->dvo_port == DEVICE_PORT_DVOB) ? + "SDVOB" : "SDVOC"); + p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]); if (!p_mapping->initialized) { - p_mapping->dvo_port = p_child->old.dvo_port; - p_mapping->slave_addr = p_child->old.slave_addr; - p_mapping->dvo_wiring = p_child->old.dvo_wiring; - p_mapping->ddc_pin = p_child->old.ddc_pin; - p_mapping->i2c_pin = p_child->old.i2c_pin; + p_mapping->dvo_port = child->dvo_port; + p_mapping->slave_addr = child->slave_addr; + p_mapping->dvo_wiring = child->dvo_wiring; + p_mapping->ddc_pin = child->ddc_pin; + p_mapping->i2c_pin = child->i2c_pin; p_mapping->initialized = 1; DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", p_mapping->dvo_port, @@ -469,7 +469,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); } - if (p_child->old.slave2_addr) { + if (child->slave2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" @@ -905,23 +905,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, uint8_t hdmi_level_shift; int i, j; bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; - uint8_t aux_channel; + uint8_t aux_channel, ddc_pin; /* Each DDI port can have more than one value on the "DVO Port" field, * so look for all the possible values for each port and abort if more * than one is found. */ - int dvo_ports[][2] = { - {DVO_PORT_HDMIA, DVO_PORT_DPA}, - {DVO_PORT_HDMIB, DVO_PORT_DPB}, - {DVO_PORT_HDMIC, DVO_PORT_DPC}, - {DVO_PORT_HDMID, DVO_PORT_DPD}, - {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ }, + int dvo_ports[][3] = { + {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, + {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, + {DVO_PORT_HDMIC, DVO_PORT_DPC, -1}, + {DVO_PORT_HDMID, DVO_PORT_DPD, -1}, + {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, }; /* Find the child device to use, abort if more than one found. */ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { it = dev_priv->vbt.child_dev + i; - for (j = 0; j < 2; j++) { + for (j = 0; j < 3; j++) { if (dvo_ports[port][j] == -1) break; @@ -939,6 +939,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, return; aux_channel = child->raw[25]; + ddc_pin = child->common.ddc_pin; is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; @@ -970,11 +971,27 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); if (is_dvi) { - if (child->common.ddc_pin == 0x05 && port != PORT_B) + if (port == PORT_E) { + info->alternate_ddc_pin = ddc_pin; + /* if DDIE share ddc pin with other port, then + * dvi/hdmi couldn't exist on the shared port. + * Otherwise they share the same ddc bin and system + * couldn't communicate with them seperately. */ + if (ddc_pin == DDC_PIN_B) { + dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0; + dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0; + } else if (ddc_pin == DDC_PIN_C) { + dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0; + dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0; + } else if (ddc_pin == DDC_PIN_D) { + dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0; + dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0; + } + } else if (ddc_pin == DDC_PIN_B && port != PORT_B) DRM_DEBUG_KMS("Unexpected DDC pin for port B\n"); - if (child->common.ddc_pin == 0x04 && port != PORT_C) + else if (ddc_pin == DDC_PIN_C && port != PORT_C) DRM_DEBUG_KMS("Unexpected DDC pin for port C\n"); - if (child->common.ddc_pin == 0x06 && port != PORT_D) + else if (ddc_pin == DDC_PIN_D && port != PORT_D) DRM_DEBUG_KMS("Unexpected DDC pin for port D\n"); } @@ -1060,27 +1077,30 @@ parse_device_mapping(struct drm_i915_private *dev_priv, return; } if (bdb->version < 195) { - expected_size = 33; + expected_size = sizeof(struct old_child_dev_config); } else if (bdb->version == 195) { expected_size = 37; } else if (bdb->version <= 197) { expected_size = 38; } else { expected_size = 38; - DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n", - expected_size, bdb->version); + BUILD_BUG_ON(sizeof(*p_child) < 38); + DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n", + bdb->version, expected_size); } - if (expected_size > sizeof(*p_child)) { - DRM_ERROR("child_device_config cannot fit in p_child\n"); + /* The legacy sized child device config is the minimum we need. */ + if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { + DRM_ERROR("Child device config size %u is too small.\n", + p_defs->child_dev_size); return; } - if (p_defs->child_dev_size != expected_size) { - DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n", + /* Flag an error for unexpected size, but continue anyway. */ + if (p_defs->child_dev_size != expected_size) + DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n", p_defs->child_dev_size, expected_size, bdb->version); - return; - } + /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ @@ -1125,7 +1145,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv, child_dev_ptr = dev_priv->vbt.child_dev + count; count++; - memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); + + /* + * Copy as much as we know (sizeof) and is available + * (child_dev_size) of the child device. Accessing the data must + * depend on VBT version. + */ + memcpy(child_dev_ptr, p_child, + min_t(size_t, p_defs->child_dev_size, sizeof(*p_child))); } return; } diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 6d909efbf43f..46cd5c7ebacd 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -203,9 +203,11 @@ struct bdb_general_features { #define DEVICE_PORT_DVOB 0x01 #define DEVICE_PORT_DVOC 0x02 -/* We used to keep this struct but without any version control. We should avoid +/* + * We used to keep this struct but without any version control. We should avoid * using it in the future, but it should be safe to keep using it in the old - * code. */ + * code. Do not change; we rely on its size. + */ struct old_child_dev_config { u16 handle; u16 device_type; @@ -756,11 +758,6 @@ int intel_parse_bios(struct drm_device *dev); #define DVO_C 2 #define DVO_D 3 -/* define the PORT for DP output type */ -#define PORT_IDPB 7 -#define PORT_IDPC 8 -#define PORT_IDPD 9 - /* Possible values for the "DVO Port" field for versions >= 155: */ #define DVO_PORT_HDMIA 0 #define DVO_PORT_HDMIB 1 @@ -773,6 +770,8 @@ int intel_parse_bios(struct drm_device *dev); #define DVO_PORT_DPC 8 #define DVO_PORT_DPD 9 #define DVO_PORT_DPA 10 +#define DVO_PORT_DPE 11 +#define DVO_PORT_HDMIE 12 #define DVO_PORT_MIPIA 21 #define DVO_PORT_MIPIB 22 #define DVO_PORT_MIPIC 23 diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 5dff8b7e0f03..4823184258a0 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -128,7 +128,7 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */ }; -/* Skylake H, S, and Skylake Y with 0.95V VccIO */ +/* Skylake H and S */ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { { 0x00002016, 0x000000A0, 0x0 }, { 0x00005012, 0x0000009B, 0x0 }, @@ -143,23 +143,23 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { /* Skylake U */ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { - { 0x00002016, 0x000000A2, 0x0 }, + { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */ - { 0x00002016, 0x0000009D, 0x0 }, + { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x0000201B, 0x0000009D, 0x0 }, { 0x00005012, 0x000000C7, 0x0 }, { 0x00007011, 0x000000C7, 0x0 }, { 0x00002016, 0x00000088, 0x0 }, { 0x00005012, 0x000000C7, 0x0 }, }; -/* Skylake Y with 0.85V VccIO */ -static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = { +/* Skylake Y */ +static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */ + { 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */ { 0x00000018, 0x0000009D, 0x0 }, { 0x00005012, 0x000000C7, 0x0 }, { 0x00007011, 0x000000C7, 0x0 }, @@ -168,7 +168,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = { }; /* - * Skylake H and S, and Skylake Y with 0.95V VccIO + * Skylake H and S * eDP 1.4 low vswing translation parameters */ static const struct ddi_buf_trans skl_ddi_translations_edp[] = { @@ -202,10 +202,10 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = { }; /* - * Skylake Y with 0.95V VccIO + * Skylake Y * eDP 1.4 low vswing translation parameters */ -static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = { +static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = { { 0x00000018, 0x000000A8, 0x0 }, { 0x00004013, 0x000000AB, 0x0 }, { 0x00007011, 0x000000A4, 0x0 }, @@ -218,7 +218,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = { { 0x00000018, 0x0000008A, 0x0 }, }; -/* Skylake H, S and U, and Skylake Y with 0.95V VccIO */ +/* Skylake U, H and S */ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { { 0x00000018, 0x000000AC, 0x0 }, { 0x00005012, 0x0000009D, 0x0 }, @@ -233,8 +233,8 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { { 0x00000018, 0x000000C7, 0x0 }, }; -/* Skylake Y with 0.85V VccIO */ -static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = { +/* Skylake Y */ +static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { { 0x00000018, 0x000000A1, 0x0 }, { 0x00005012, 0x000000DF, 0x0 }, { 0x00007011, 0x00000084, 0x0 }, @@ -244,7 +244,7 @@ static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = { { 0x00006013, 0x000000C7, 0x0 }, { 0x00000018, 0x0000008A, 0x0 }, { 0x00003015, 0x000000C7, 0x0 }, /* Default */ - { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost */ + { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */ { 0x00000018, 0x000000C7, 0x0 }, }; @@ -335,19 +335,11 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, int *n_entries) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct ddi_buf_trans *ddi_translations; - static int is_095v = -1; - - if (is_095v == -1) { - u32 spr1 = I915_READ(UAIMI_SPR1); - - is_095v = spr1 & SKL_VCCIO_MASK; - } - if (IS_SKL_ULX(dev) && !is_095v) { - ddi_translations = skl_y_085v_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_dp); + if (IS_SKL_ULX(dev)) { + ddi_translations = skl_y_ddi_translations_dp; + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); } else if (IS_SKL_ULT(dev)) { ddi_translations = skl_u_ddi_translations_dp; *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); @@ -364,23 +356,14 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; const struct ddi_buf_trans *ddi_translations; - static int is_095v = -1; - - if (is_095v == -1) { - u32 spr1 = I915_READ(UAIMI_SPR1); - is_095v = spr1 & SKL_VCCIO_MASK; - } - - if (IS_SKL_ULX(dev) && !is_095v) { + if (IS_SKL_ULX(dev)) { if (dev_priv->edp_low_vswing) { - ddi_translations = skl_y_085v_ddi_translations_edp; - *n_entries = - ARRAY_SIZE(skl_y_085v_ddi_translations_edp); + ddi_translations = skl_y_ddi_translations_edp; + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); } else { - ddi_translations = skl_y_085v_ddi_translations_dp; - *n_entries = - ARRAY_SIZE(skl_y_085v_ddi_translations_dp); + ddi_translations = skl_y_ddi_translations_dp; + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); } } else if (IS_SKL_ULT(dev)) { if (dev_priv->edp_low_vswing) { @@ -407,19 +390,11 @@ static const struct ddi_buf_trans * skl_get_buf_trans_hdmi(struct drm_device *dev, int *n_entries) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct ddi_buf_trans *ddi_translations; - static int is_095v = -1; - - if (is_095v == -1) { - u32 spr1 = I915_READ(UAIMI_SPR1); - - is_095v = spr1 & SKL_VCCIO_MASK; - } - if (IS_SKL_ULX(dev) && !is_095v) { - ddi_translations = skl_y_085v_ddi_translations_hdmi; - *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_hdmi); + if (IS_SKL_ULX(dev)) { + ddi_translations = skl_y_ddi_translations_hdmi; + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); } else { ddi_translations = skl_ddi_translations_hdmi; *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d38ceb039de6..deba3330de71 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5102,7 +5102,6 @@ static enum intel_display_power_domain port_to_power_domain(enum port port) { switch (port) { case PORT_A: - case PORT_E: return POWER_DOMAIN_PORT_DDI_A_4_LANES; case PORT_B: return POWER_DOMAIN_PORT_DDI_B_4_LANES; @@ -5110,6 +5109,8 @@ static enum intel_display_power_domain port_to_power_domain(enum port port) return POWER_DOMAIN_PORT_DDI_C_4_LANES; case PORT_D: return POWER_DOMAIN_PORT_DDI_D_4_LANES; + case PORT_E: + return POWER_DOMAIN_PORT_DDI_E_2_LANES; default: WARN_ON_ONCE(1); return POWER_DOMAIN_PORT_OTHER; @@ -5684,16 +5685,13 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv) /* enable PG1 and Misc I/O */ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); - /* DPLL0 already enabed !? */ - if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) { - DRM_DEBUG_DRIVER("DPLL0 already running\n"); - return; + /* DPLL0 not enabled (happens on early BIOS versions) */ + if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { + /* enable DPLL0 */ + required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); + skl_dpll0_enable(dev_priv, required_vco); } - /* enable DPLL0 */ - required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); - skl_dpll0_enable(dev_priv, required_vco); - /* set CDCLK to the frequency the BIOS chose */ skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); @@ -10114,7 +10112,7 @@ static struct drm_framebuffer * mode_fits_in_fbdev(struct drm_device *dev, struct drm_display_mode *mode) { -#ifdef CONFIG_DRM_I915_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_framebuffer *fb; @@ -13509,7 +13507,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, struct intel_plane *primary; struct intel_plane_state *state; const uint32_t *intel_primary_formats; - int num_formats; + unsigned int num_formats; primary = kzalloc(sizeof(*primary), GFP_KERNEL); if (primary == NULL) @@ -13947,6 +13945,15 @@ static void intel_setup_outputs(struct drm_device *dev) intel_ddi_init(dev, PORT_C); if (found & SFUSE_STRAP_DDID_DETECTED) intel_ddi_init(dev, PORT_D); + /* + * On SKL we don't have a way to detect DDI-E so we rely on VBT. + */ + if (IS_SKYLAKE(dev) && + (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || + dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || + dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) + intel_ddi_init(dev, PORT_E); + } else if (HAS_PCH_SPLIT(dev)) { int found; dpd_is_edp = intel_dp_is_edp(dev, PORT_D); @@ -14302,7 +14309,7 @@ intel_user_framebuffer_create(struct drm_device *dev, return intel_framebuffer_create(dev, mode_cmd, obj); } -#ifndef CONFIG_DRM_I915_FBDEV +#ifndef CONFIG_DRM_FBDEV_EMULATION static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) { } @@ -14717,6 +14724,24 @@ void intel_modeset_init(struct drm_device *dev) if (INTEL_INFO(dev)->num_pipes == 0) return; + /* + * There may be no VBT; and if the BIOS enabled SSC we can + * just keep using it to avoid unnecessary flicker. Whereas if the + * BIOS isn't using it, don't assume it will work even if the VBT + * indicates as much. + */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { + bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & + DREF_SSC1_ENABLE); + + if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { + DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", + bios_lvds_use_ssc ? "en" : "dis", + dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); + dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; + } + } + intel_init_display(dev); intel_init_audio(dev); @@ -15278,7 +15303,6 @@ err: void intel_modeset_gem_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *c; struct drm_i915_gem_object *obj; int ret; @@ -15287,16 +15311,6 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev); mutex_unlock(&dev->struct_mutex); - /* - * There may be no VBT; and if the BIOS enabled SSC we can - * just keep using it to avoid unnecessary flicker. Whereas if the - * BIOS isn't using it, don't assume it will work even if the VBT - * indicates as much. - */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & - DREF_SSC1_ENABLE); - intel_modeset_init_hw(dev); intel_setup_overlay(dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f45872cc6d24..f8f4d99440c1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -95,9 +95,6 @@ static const int bxt_rates[] = { 162000, 216000, 243000, 270000, 324000, 432000, 540000 }; static const int skl_rates[] = { 162000, 216000, 270000, 324000, 432000, 540000 }; -static const int chv_rates[] = { 162000, 202500, 210000, 216000, - 243000, 270000, 324000, 405000, - 420000, 432000, 540000 }; static const int default_rates[] = { 162000, 270000, 540000 }; /** @@ -1159,7 +1156,7 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config) pipe_config->dpll_hw_state.ctrl1 = ctrl1; } -static void +void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config) { memset(&pipe_config->dpll_hw_state, 0, @@ -1191,30 +1188,40 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; } +static bool intel_dp_source_supports_hbr2(struct drm_device *dev) +{ + /* WaDisableHBR2:skl */ + if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) + return false; + + if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) || + (INTEL_INFO(dev)->gen >= 9)) + return true; + else + return false; +} + static int intel_dp_source_rates(struct drm_device *dev, const int **source_rates) { + int size; + if (IS_BROXTON(dev)) { *source_rates = bxt_rates; - return ARRAY_SIZE(bxt_rates); + size = ARRAY_SIZE(bxt_rates); } else if (IS_SKYLAKE(dev)) { *source_rates = skl_rates; - return ARRAY_SIZE(skl_rates); - } else if (IS_CHERRYVIEW(dev)) { - *source_rates = chv_rates; - return ARRAY_SIZE(chv_rates); + size = ARRAY_SIZE(skl_rates); + } else { + *source_rates = default_rates; + size = ARRAY_SIZE(default_rates); } - *source_rates = default_rates; + /* This depends on the fact that 5.4 is last value in the array */ + if (!intel_dp_source_supports_hbr2(dev)) + size--; - if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) - /* WaDisableHBR2:skl */ - return (DP_LINK_BW_2_7 >> 3) + 1; - else if (INTEL_INFO(dev)->gen >= 8 || - (IS_HASWELL(dev) && !IS_HSW_ULX(dev))) - return (DP_LINK_BW_5_4 >> 3) + 1; - else - return (DP_LINK_BW_2_7 >> 3) + 1; + return size; } static void @@ -3993,9 +4000,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) } } - /* Training Pattern 3 support, both source and sink */ + /* Training Pattern 3 support, Intel platforms that support HBR2 alone + * have support for TP3 hence that check is used along with dpcd check + * to ensure TP3 can be enabled. + * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is + * supported but still not enabled. + */ if (drm_dp_tps3_supported(intel_dp->dpcd) && - (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) { + intel_dp_source_supports_hbr2(dev)) { intel_dp->use_tps3 = true; DRM_DEBUG_KMS("Displayport TPS3 supported\n"); } else @@ -5166,9 +5178,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_dp_probe_oui(intel_dp); - if (!intel_dp_probe_mst(intel_dp)) + if (!intel_dp_probe_mst(intel_dp)) { + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + intel_dp_check_link_status(intel_dp); + drm_modeset_unlock(&dev->mode_config.connection_mutex); goto mst_fail; - + } } else { if (intel_dp->is_mst) { if (intel_dp_check_mst_status(intel_dp) == -EINVAL) @@ -5176,10 +5191,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) } if (!intel_dp->is_mst) { - /* - * we'll check the link status via the normal hot plug path later - - * but for short hpds we should check it now - */ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); intel_dp_check_link_status(intel_dp); drm_modeset_unlock(&dev->mode_config.connection_mutex); @@ -5221,16 +5232,17 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) return -1; } -/* check the VBT to see whether the eDP is on DP-D port */ +/* check the VBT to see whether the eDP is on another port */ bool intel_dp_is_edp(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; union child_device_config *p_child; int i; static const short port_mapping[] = { - [PORT_B] = PORT_IDPB, - [PORT_C] = PORT_IDPC, - [PORT_D] = PORT_IDPD, + [PORT_B] = DVO_PORT_DPB, + [PORT_C] = DVO_PORT_DPC, + [PORT_D] = DVO_PORT_DPD, + [PORT_E] = DVO_PORT_DPE, }; if (port == PORT_A) @@ -6067,6 +6079,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, case PORT_D: intel_encoder->hpd_pin = HPD_PORT_D; break; + case PORT_E: + intel_encoder->hpd_pin = HPD_PORT_E; + break; default: BUG(); } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index dd291d123219..677d70e4d363 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -33,6 +33,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct drm_device *dev = encoder->base.dev; struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; @@ -88,6 +89,10 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, &pipe_config->dp_m_n); pipe_config->dp_m_n.tu = slots; + + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + hsw_dp_set_ddi_pll_sel(pipe_config); + return true; } @@ -403,7 +408,7 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) static void intel_connector_add_to_fbdev(struct intel_connector *connector) { -#ifdef CONFIG_DRM_I915_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_i915_private *dev_priv = to_i915(connector->base.dev); drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); #endif @@ -411,7 +416,7 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector) static void intel_connector_remove_from_fbdev(struct intel_connector *connector) { -#ifdef CONFIG_DRM_I915_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_i915_private *dev_priv = to_i915(connector->base.dev); drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); #endif diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 090d67b04307..40e825d6a26f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1212,6 +1212,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp); void intel_edp_drrs_invalidate(struct drm_device *dev, unsigned frontbuffer_bits); void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); +void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); @@ -1225,7 +1226,7 @@ void intel_dvo_init(struct drm_device *dev); /* legacy fbdev emulation in intel_fbdev.c */ -#ifdef CONFIG_DRM_I915_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie); extern void intel_fbdev_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6c9351b2e3af..96476d7d7ed2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -55,13 +55,6 @@ static int intel_fbdev_set_par(struct fb_info *info) ret = drm_fb_helper_set_par(info); if (ret == 0) { - /* - * FIXME: fbdev presumes that all callbacks also work from - * atomic contexts and relies on that for emergency oops - * printing. KMS totally doesn't do that and the locking here is - * by far not the only place this goes wrong. Ignore this for - * now until we solve this for real. - */ mutex_lock(&fb_helper->dev->struct_mutex); intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); mutex_unlock(&fb_helper->dev->struct_mutex); @@ -80,13 +73,6 @@ static int intel_fbdev_blank(int blank, struct fb_info *info) ret = drm_fb_helper_blank(blank, info); if (ret == 0) { - /* - * FIXME: fbdev presumes that all callbacks also work from - * atomic contexts and relies on that for emergency oops - * printing. KMS totally doesn't do that and the locking here is - * by far not the only place this goes wrong. Ignore this for - * now until we solve this for real. - */ mutex_lock(&fb_helper->dev->struct_mutex); intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); mutex_unlock(&fb_helper->dev->struct_mutex); @@ -106,13 +92,6 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var, ret = drm_fb_helper_pan_display(var, info); if (ret == 0) { - /* - * FIXME: fbdev presumes that all callbacks also work from - * atomic contexts and relies on that for emergency oops - * printing. KMS totally doesn't do that and the locking here is - * by far not the only place this goes wrong. Ignore this for - * now until we solve this for real. - */ mutex_lock(&fb_helper->dev->struct_mutex); intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); mutex_unlock(&fb_helper->dev->struct_mutex); @@ -125,9 +104,9 @@ static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = intel_fbdev_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = intel_fbdev_pan_display, .fb_blank = intel_fbdev_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -236,9 +215,9 @@ static int intelfb_create(struct drm_fb_helper *helper, obj = intel_fb->obj; size = obj->base.size; - info = framebuffer_alloc(0, &dev->pdev->dev); - if (!info) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_unpin; } @@ -247,24 +226,13 @@ static int intelfb_create(struct drm_fb_helper *helper, fb = &ifbdev->fb->base; ifbdev->helper.fb = fb; - ifbdev->helper.fbdev = info; strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out_unpin; - } /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_unpin; - } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; @@ -276,7 +244,7 @@ static int intelfb_create(struct drm_fb_helper *helper, size); if (!info->screen_base) { ret = -ENOSPC; - goto out_unpin; + goto out_destroy_fbi; } info->screen_size = size; @@ -303,6 +271,8 @@ static int intelfb_create(struct drm_fb_helper *helper, vga_switcheroo_client_fb_set(dev->pdev, info); return 0; +out_destroy_fbi: + drm_fb_helper_release_fbi(helper); out_unpin: i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); @@ -544,16 +514,9 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { - if (ifbdev->helper.fbdev) { - struct fb_info *info = ifbdev->helper.fbdev; - unregister_framebuffer(info); - iounmap(info->screen_base); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&ifbdev->helper); + drm_fb_helper_release_fbi(&ifbdev->helper); drm_fb_helper_fini(&ifbdev->helper); @@ -802,7 +765,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) memset_io(info->screen_base, 0, info->screen_size); - fb_set_suspend(info, state); + drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4da737ccf69a..feb31d891482 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -2020,6 +2020,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; + uint8_t alternate_ddc_pin; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); @@ -2060,6 +2061,26 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi->ddc_bus = GMBUS_PIN_DPD; intel_encoder->hpd_pin = HPD_PORT_D; break; + case PORT_E: + /* On SKL PORT E doesn't have seperate GMBUS pin + * We rely on VBT to set a proper alternate GMBUS pin. */ + alternate_ddc_pin = + dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin; + switch (alternate_ddc_pin) { + case DDC_PIN_B: + intel_hdmi->ddc_bus = GMBUS_PIN_DPB; + break; + case DDC_PIN_C: + intel_hdmi->ddc_bus = GMBUS_PIN_DPC; + break; + case DDC_PIN_D: + intel_hdmi->ddc_bus = GMBUS_PIN_DPD; + break; + default: + MISSING_CASE(alternate_ddc_pin); + } + intel_encoder->hpd_pin = HPD_PORT_E; + break; case PORT_A: intel_encoder->hpd_pin = HPD_PORT_A; /* Internal port only for eDP. */ diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 032a0bf75f3b..53c0173a39fe 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -91,6 +91,9 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port) case HPD_PORT_D: *port = PORT_D; return true; + case HPD_PORT_E: + *port = PORT_E; + return true; default: return false; /* no hpd */ } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e9520afc2033..40cbba4ea4ba 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1027,6 +1027,8 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq) if (ret) goto unpin_ctx_obj; + ctx_obj->dirty = true; + /* Invalidate GuC TLB. */ if (i915.enable_guc_submission) I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b1bd25e1e853..3f682a1a08ce 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -297,6 +297,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_AUX_D) | \ @@ -316,6 +317,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ BIT(POWER_DOMAIN_INIT)) #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 9f9780b7ddf0..4f2068fe5d88 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c @@ -70,18 +70,22 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); BUG_ON(pixels_current == pixels_prev); + obj = drm_gem_object_lookup(dev, file_priv, handle); + if (!obj) + return -ENOENT; + ret = mgag200_bo_reserve(pixels_1, true); if (ret) { WREG8(MGA_CURPOSXL, 0); WREG8(MGA_CURPOSXH, 0); - return ret; + goto out_unref; } ret = mgag200_bo_reserve(pixels_2, true); if (ret) { WREG8(MGA_CURPOSXL, 0); WREG8(MGA_CURPOSXH, 0); mgag200_bo_unreserve(pixels_1); - return ret; + goto out_unreserve1; } if (!handle) { @@ -106,16 +110,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, } } - mutex_lock(&dev->struct_mutex); - obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) { - mutex_unlock(&dev->struct_mutex); - ret = -ENOENT; - goto out1; - } - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - bo = gem_to_mga_bo(obj); ret = mgag200_bo_reserve(bo, true); if (ret) { @@ -252,7 +246,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, if (ret) mga_hide_cursor(mdev); mgag200_bo_unreserve(pixels_1); +out_unreserve1: mgag200_bo_unreserve(pixels_2); +out_unref: + drm_gem_object_unreference_unlocked(obj); + return ret; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 97745991544d..b0af77454d52 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -35,6 +35,7 @@ static const struct pci_device_id pciidlist[] = { { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB }, { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH }, { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER }, + { PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 }, {0,} }; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index e9eea1d4e7c3..912151c36d59 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -180,6 +180,7 @@ enum mga_type { G200_EV, G200_EH, G200_ER, + G200_EW3, }; #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B) diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 958cf3cf082d..87de15ea1f93 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -101,7 +101,7 @@ static void mga_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct mga_fbdev *mfbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width, rect->height); } @@ -110,7 +110,7 @@ static void mga_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct mga_fbdev *mfbdev = info->par; - sys_copyarea(info, area); + drm_fb_helper_sys_copyarea(info, area); mga_dirty_update(mfbdev, area->dx, area->dy, area->width, area->height); } @@ -119,7 +119,7 @@ static void mga_imageblit(struct fb_info *info, const struct fb_image *image) { struct mga_fbdev *mfbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); mga_dirty_update(mfbdev, image->dx, image->dy, image->width, image->height); } @@ -166,7 +166,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper, struct fb_info *info; struct drm_framebuffer *fb; struct drm_gem_object *gobj = NULL; - struct device *device = &dev->pdev->dev; int ret; void *sysram; int size; @@ -189,9 +188,9 @@ static int mgag200fb_create(struct drm_fb_helper *helper, if (!sysram) return -ENOMEM; - info = framebuffer_alloc(0, device); - if (info == NULL) - return -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) + return PTR_ERR(info); info->par = mfbdev; @@ -206,14 +205,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper, /* setup helper */ mfbdev->helper.fb = fb; - mfbdev->helper.fbdev = info; - - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); - ret = -ENOMEM; - goto out; - } strcpy(info->fix.id, "mgadrmfb"); @@ -221,11 +212,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper, info->fbops = &mgag200fb_ops; /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out; - } info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base; info->apertures->ranges[0].size = mdev->mc.vram_size; @@ -240,24 +226,15 @@ static int mgag200fb_create(struct drm_fb_helper *helper, DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); return 0; -out: - return ret; } static int mga_fbdev_destroy(struct drm_device *dev, struct mga_fbdev *mfbdev) { - struct fb_info *info; struct mga_framebuffer *mfb = &mfbdev->mfb; - if (mfbdev->helper.fbdev) { - info = mfbdev->helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&mfbdev->helper); + drm_fb_helper_release_fbi(&mfbdev->helper); if (mfb->obj) { drm_gem_object_unreference_unlocked(mfb->obj); diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index d3dcf54e6233..10535e3b75f2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c @@ -101,6 +101,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev) case G200_SE_B: case G200_EV: case G200_WB: + case G200_EW3: data = 1; clock = 2; break; diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index f6b283b8375e..de06388069e7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -82,12 +82,19 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem) int orig; int test1, test2; int orig1, orig2; + unsigned int vram_size; /* Probe */ orig = ioread16(mem); iowrite16(0, mem); - for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) { + vram_size = mdev->mc.vram_window; + + if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) { + vram_size = vram_size - 0x400000; + } + + for (offset = 0x100000; offset < vram_size; offset += 0x4000) { orig1 = ioread8(mem + offset); orig2 = ioread8(mem + offset + 0x100); @@ -345,23 +352,15 @@ mgag200_dumb_mmap_offset(struct drm_file *file, uint64_t *offset) { struct drm_gem_object *obj; - int ret; struct mgag200_bo *bo; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto out_unlock; - } + if (obj == NULL) + return -ENOENT; bo = gem_to_mga_bo(obj); *offset = mgag200_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); - ret = 0; -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; - + drm_gem_object_unreference_unlocked(obj); + return 0; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index cd75cff096e1..c99d3fe12881 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -104,6 +104,8 @@ static bool mga_crtc_mode_fixup(struct drm_crtc *crtc, return true; } +#define P_ARRAY_SIZE 9 + static int mga_g200se_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; @@ -111,37 +113,97 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) unsigned int testp, testm, testn; unsigned int p, m, n; unsigned int computed; + unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1}; + unsigned int fvv; + unsigned int i; + + if (mdev->unique_rev_id <= 0x03) { + + m = n = p = 0; + vcomax = 320000; + vcomin = 160000; + pllreffreq = 25000; + + delta = 0xffffffff; + permitteddelta = clock * 5 / 1000; + + for (testp = 8; testp > 0; testp /= 2) { + if (clock * testp > vcomax) + continue; + if (clock * testp < vcomin) + continue; + + for (testn = 17; testn < 256; testn++) { + for (testm = 1; testm < 32; testm++) { + computed = (pllreffreq * testn) / + (testm * testp); + if (computed > clock) + tmpdelta = computed - clock; + else + tmpdelta = clock - computed; + if (tmpdelta < delta) { + delta = tmpdelta; + m = testm - 1; + n = testn - 1; + p = testp - 1; + } + } + } + } + } else { - m = n = p = 0; - vcomax = 320000; - vcomin = 160000; - pllreffreq = 25000; - delta = 0xffffffff; - permitteddelta = clock * 5 / 1000; + m = n = p = 0; + vcomax = 1600000; + vcomin = 800000; + pllreffreq = 25000; - for (testp = 8; testp > 0; testp /= 2) { - if (clock * testp > vcomax) - continue; - if (clock * testp < vcomin) - continue; + if (clock < 25000) + clock = 25000; - for (testn = 17; testn < 256; testn++) { - for (testm = 1; testm < 32; testm++) { - computed = (pllreffreq * testn) / - (testm * testp); - if (computed > clock) - tmpdelta = computed - clock; - else - tmpdelta = clock - computed; - if (tmpdelta < delta) { - delta = tmpdelta; - m = testm - 1; - n = testn - 1; - p = testp - 1; + clock = clock * 2; + + delta = 0xFFFFFFFF; + /* Permited delta is 0.5% as VESA Specification */ + permitteddelta = clock * 5 / 1000; + + for (i = 0 ; i < P_ARRAY_SIZE ; i++) { + testp = pvalues_e4[i]; + + if ((clock * testp) > vcomax) + continue; + if ((clock * testp) < vcomin) + continue; + + for (testn = 50; testn <= 256; testn++) { + for (testm = 1; testm <= 32; testm++) { + computed = (pllreffreq * testn) / + (testm * testp); + if (computed > clock) + tmpdelta = computed - clock; + else + tmpdelta = clock - computed; + + if (tmpdelta < delta) { + delta = tmpdelta; + m = testm - 1; + n = testn - 1; + p = testp - 1; + } } } } + + fvv = pllreffreq * testn / testm; + fvv = (fvv - 800000) / 50000; + + if (fvv > 15) + fvv = 15; + + p |= (fvv << 4); + m |= 0x80; + + clock = clock / 2; } if (delta > permitteddelta) { @@ -159,7 +221,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; - unsigned int testp, testm, testn; + unsigned int testp, testm, testn, testp2; unsigned int p, m, n; unsigned int computed; int i, j, tmpcount, vcount; @@ -167,31 +229,71 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) u8 tmp; m = n = p = 0; - vcomax = 550000; - vcomin = 150000; - pllreffreq = 48000; delta = 0xffffffff; - for (testp = 1; testp < 9; testp++) { - if (clock * testp > vcomax) - continue; - if (clock * testp < vcomin) - continue; + if (mdev->type == G200_EW3) { + + vcomax = 800000; + vcomin = 400000; + pllreffreq = 25000; + + for (testp = 1; testp < 8; testp++) { + for (testp2 = 1; testp2 < 8; testp2++) { + if (testp < testp2) + continue; + if ((clock * testp * testp2) > vcomax) + continue; + if ((clock * testp * testp2) < vcomin) + continue; + for (testm = 1; testm < 26; testm++) { + for (testn = 32; testn < 2048 ; testn++) { + computed = (pllreffreq * testn) / + (testm * testp * testp2); + if (computed > clock) + tmpdelta = computed - clock; + else + tmpdelta = clock - computed; + if (tmpdelta < delta) { + delta = tmpdelta; + m = ((testn & 0x100) >> 1) | + (testm); + n = (testn & 0xFF); + p = ((testn & 0x600) >> 3) | + (testp2 << 3) | + (testp); + } + } + } + } + } + } else { - for (testm = 1; testm < 17; testm++) { - for (testn = 1; testn < 151; testn++) { - computed = (pllreffreq * testn) / - (testm * testp); - if (computed > clock) - tmpdelta = computed - clock; - else - tmpdelta = clock - computed; - if (tmpdelta < delta) { - delta = tmpdelta; - n = testn - 1; - m = (testm - 1) | ((n >> 1) & 0x80); - p = testp - 1; + vcomax = 550000; + vcomin = 150000; + pllreffreq = 48000; + + for (testp = 1; testp < 9; testp++) { + if (clock * testp > vcomax) + continue; + if (clock * testp < vcomin) + continue; + + for (testm = 1; testm < 17; testm++) { + for (testn = 1; testn < 151; testn++) { + computed = (pllreffreq * testn) / + (testm * testp); + if (computed > clock) + tmpdelta = computed - clock; + else + tmpdelta = clock - computed; + if (tmpdelta < delta) { + delta = tmpdelta; + n = testn - 1; + m = (testm - 1) | + ((n >> 1) & 0x80); + p = testp - 1; + } } } } @@ -569,6 +671,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock) return mga_g200se_set_plls(mdev, clock); break; case G200_WB: + case G200_EW3: return mga_g200wb_set_plls(mdev, clock); break; case G200_EV: @@ -820,6 +923,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, option2 = 0x00008000; break; case G200_WB: + case G200_EW3: dacvalue[MGA1064_VREF_CTL] = 0x07; option = 0x41049120; option2 = 0x0000b000; @@ -875,7 +979,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (IS_G200_SE(mdev) && ((i == 0x2c) || (i == 0x2d) || (i == 0x2e))) continue; - if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) && + if ((mdev->type == G200_EV || + mdev->type == G200_WB || + mdev->type == G200_EH || + mdev->type == G200_EW3) && (i >= 0x44) && (i <= 0x4e)) continue; @@ -977,7 +1084,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, else ext_vga[3] = ((1 << bppshift) - 1) | 0x80; ext_vga[4] = 0; - if (mdev->type == G200_WB) + if (mdev->type == G200_WB || mdev->type == G200_EW3) ext_vga[1] |= 0x88; /* Set pixel clocks */ @@ -993,6 +1100,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (mdev->type == G200_ER) WREG_ECRT(0x24, 0x5); + if (mdev->type == G200_EW3) + WREG_ECRT(0x34, 0x5); + if (mdev->type == G200_EV) { WREG_ECRT(6, 0); } @@ -1205,7 +1315,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc) WREG_SEQ(1, tmp | 0x20); } - if (mdev->type == G200_WB) + if (mdev->type == G200_WB || mdev->type == G200_EW3) mga_g200wb_prepare(crtc); WREG_CRT(17, 0); @@ -1222,7 +1332,7 @@ static void mga_crtc_commit(struct drm_crtc *crtc) const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; u8 tmp; - if (mdev->type == G200_WB) + if (mdev->type == G200_WB || mdev->type == G200_EW3) mga_g200wb_commit(crtc); if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) { @@ -1492,7 +1602,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector, if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (24400 * 1024)) return MODE_BANDWIDTH; - } else if (mdev->unique_rev_id >= 0x02) { + } else if (mdev->unique_rev_id == 0x02) { if (mode->hdisplay > 1920) return MODE_VIRTUAL_X; if (mode->vdisplay > 1200) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 08ba8d0d93f5..8e6c7c638e24 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -9,6 +9,7 @@ config DRM_MSM select DRM_PANEL select SHMEM select TMPFS + select QCOM_SCM default y help DRM/KMS driver for MSM/snapdragon. @@ -53,3 +54,17 @@ config DRM_MSM_DSI_PLL help Choose this option to enable DSI PLL driver which provides DSI source clocks under common clock framework. + +config DRM_MSM_DSI_28NM_PHY + bool "Enable DSI 28nm PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 28nm DSI PHY is used on the platform. + +config DRM_MSM_DSI_20NM_PHY + bool "Enable DSI 20nm PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 20nm DSI PHY is used on the platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 16a81b94d6f0..0a543eb5e5d7 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -1,5 +1,5 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi +ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi msm-y := \ adreno/adreno_device.o \ @@ -10,6 +10,7 @@ msm-y := \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ hdmi/hdmi_connector.o \ + hdmi/hdmi_hdcp.o \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8x60.o \ @@ -53,12 +54,18 @@ msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ + dsi/dsi_cfg.o \ dsi/dsi_host.o \ dsi/dsi_manager.o \ - dsi/dsi_phy.o \ + dsi/phy/dsi_phy.o \ mdp/mdp5/mdp5_cmd_encoder.o -msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \ - dsi/pll/dsi_pll_28nm.o +msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o +msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o + +ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) +msm-y += dsi/pll/dsi_pll.o +msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o +endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 23176e402796..0261f0d31612 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index 1c599e5cf318..48d133711487 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -326,6 +326,13 @@ enum a3xx_tex_type { A3XX_TEX_3D = 3, }; +enum a3xx_tex_msaa { + A3XX_TPL1_MSAA1X = 0, + A3XX_TPL1_MSAA2X = 1, + A3XX_TPL1_MSAA4X = 2, + A3XX_TPL1_MSAA8X = 3, +}; + #define A3XX_INT0_RBBM_GPU_IDLE 0x00000001 #define A3XX_INT0_RBBM_AHB_ERROR 0x00000002 #define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004 @@ -2652,6 +2659,7 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val) #define REG_A3XX_VGT_IMMED_DATA 0x000021fd #define REG_A3XX_TEX_SAMP_0 0x00000000 +#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 #define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c #define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 @@ -2695,6 +2703,7 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val { return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK; } +#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000 #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 #define REG_A3XX_TEX_SAMP_1 0x00000001 @@ -2750,6 +2759,12 @@ static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val) { return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK; } +#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000 +#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20 +static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val) +{ + return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK; +} #define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 #define A3XX_TEX_CONST_0_FMT__SHIFT 22 static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) @@ -2785,7 +2800,7 @@ static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val) } #define REG_A3XX_TEX_CONST_2 0x00000002 -#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff +#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff #define A3XX_TEX_CONST_2_INDX__SHIFT 0 static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val) { @@ -2805,7 +2820,7 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) } #define REG_A3XX_TEX_CONST_3 0x00000003 -#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x00007fff +#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff #define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0 static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val) { diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 3f06ecf62583..ac55066db3b0 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -227,6 +227,7 @@ enum a4xx_depth_format { DEPTH4_NONE = 0, DEPTH4_16 = 1, DEPTH4_24_8 = 2, + DEPTH4_32 = 3, }; enum a4xx_tess_spacing { @@ -429,7 +430,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; } #define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000 -#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000 +#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000 #define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14 static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) { @@ -439,7 +440,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; } static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; } -#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8 +#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8 #define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3 static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val) { @@ -570,6 +571,15 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val) return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK; } +#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa +#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 +#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc +#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2 +static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val) +{ + return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK; +} + #define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb #define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f #define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 @@ -811,6 +821,23 @@ static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v #define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107 #define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001 +#define REG_A4XX_RB_STENCIL_INFO 0x00002108 +#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 +#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000 +#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12 +static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val) +{ + return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK; +} + +#define REG_A4XX_RB_STENCIL_PITCH 0x00002109 +#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff +#define A4XX_RB_STENCIL_PITCH__SHIFT 0 +static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK; +} + #define REG_A4XX_RB_STENCILREFMASK 0x0000210b #define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff #define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 @@ -1433,6 +1460,7 @@ static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val) { return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK; } +#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000 #define REG_A4XX_SP_CS_CTRL_REG0 0x00002300 @@ -1470,6 +1498,76 @@ static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A4XX_SP_HS_LENGTH_REG 0x00002312 +#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a +#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK; +} +#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 +#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 +static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) +{ + return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK; +} + +static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; } +#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff +#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK; +} +#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK; +} +#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK; +} +#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; } +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK; +} + #define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 @@ -1492,6 +1590,82 @@ static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A4XX_SP_DS_LENGTH_REG 0x00002339 +#define REG_A4XX_SP_GS_PARAM_REG 0x00002341 +#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK; +} +#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00 +#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8 +static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK; +} +#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 +#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 +static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK; +} + +static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; } +#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff +#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK; +} +#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK; +} +#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK; +} +#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; } +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK; +} + #define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 @@ -1693,6 +1867,18 @@ static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val) { return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK; } +#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 +#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK; +} +#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 +#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK; +} #define REG_A4XX_VFD_CONTROL_4 0x00002204 @@ -2489,6 +2675,8 @@ static inline uint32_t A4XX_UNKNOWN_20F7(float val) #define REG_A4XX_UNKNOWN_22D7 0x000022d7 +#define REG_A4XX_UNKNOWN_2352 0x00002352 + #define REG_A4XX_TEX_SAMP_0 0x00000000 #define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 #define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index 9562a1fa552b..399a9e528139 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index bd5b23bf9041..41904fed1350 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -67,7 +67,7 @@ enum vgt_event_type { enum pc_di_primtype { DI_PT_NONE = 0, - DI_PT_POINTLIST_A2XX = 1, + DI_PT_POINTLIST_PSIZE = 1, DI_PT_LINELIST = 2, DI_PT_LINESTRIP = 3, DI_PT_TRILIST = 4, @@ -75,7 +75,7 @@ enum pc_di_primtype { DI_PT_TRISTRIP = 6, DI_PT_LINELOOP = 7, DI_PT_RECTLIST = 8, - DI_PT_POINTLIST_A3XX = 9, + DI_PT_POINTLIST = 9, DI_PT_LINE_ADJ = 10, DI_PT_LINESTRIP_ADJ = 11, DI_PT_TRI_ADJ = 12, diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 1f2561e2ff71..6edcd6f57e70 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -15,10 +15,10 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) { - if (!msm_dsi || !msm_dsi->panel) + if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) return NULL; - return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ? + return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ? msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] : msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; } @@ -74,19 +74,15 @@ static void dsi_destroy(struct msm_dsi *msm_dsi) static struct msm_dsi *dsi_init(struct platform_device *pdev) { - struct msm_dsi *msm_dsi = NULL; + struct msm_dsi *msm_dsi; int ret; - if (!pdev) { - ret = -ENXIO; - goto fail; - } + if (!pdev) + return ERR_PTR(-ENXIO); msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL); - if (!msm_dsi) { - ret = -ENOMEM; - goto fail; - } + if (!msm_dsi) + return ERR_PTR(-ENOMEM); DBG("dsi probed=%p", msm_dsi); msm_dsi->pdev = pdev; @@ -95,24 +91,22 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev) /* Init dsi host */ ret = msm_dsi_host_init(msm_dsi); if (ret) - goto fail; + goto destroy_dsi; /* GET dsi PHY */ ret = dsi_get_phy(msm_dsi); if (ret) - goto fail; + goto destroy_dsi; /* Register to dsi manager */ ret = msm_dsi_manager_register(msm_dsi); if (ret) - goto fail; + goto destroy_dsi; return msm_dsi; -fail: - if (msm_dsi) - dsi_destroy(msm_dsi); - +destroy_dsi: + dsi_destroy(msm_dsi); return ERR_PTR(ret); } @@ -196,6 +190,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) { struct msm_drm_private *priv = dev->dev_private; + struct drm_bridge *ext_bridge; int ret, i; if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || @@ -223,10 +218,25 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, msm_dsi->encoders[i] = encoders[i]; } - msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id); + /* + * check if the dsi encoder output is connected to a panel or an + * external bridge. We create a connector only if we're connected to a + * drm_panel device. When we're connected to an external bridge, we + * assume that the drm_bridge driver will create the connector itself. + */ + ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host); + + if (ext_bridge) + msm_dsi->connector = + msm_dsi_manager_ext_bridge_init(msm_dsi->id); + else + msm_dsi->connector = + msm_dsi_manager_connector_init(msm_dsi->id); + if (IS_ERR(msm_dsi->connector)) { ret = PTR_ERR(msm_dsi->connector); - dev_err(dev->dev, "failed to create dsi connector: %d\n", ret); + dev_err(dev->dev, + "failed to create dsi connector: %d\n", ret); msm_dsi->connector = NULL; goto fail; } @@ -242,10 +252,12 @@ fail: msm_dsi_manager_bridge_destroy(msm_dsi->bridge); msm_dsi->bridge = NULL; } - if (msm_dsi->connector) { + + /* don't destroy connector if we didn't make it */ + if (msm_dsi->connector && !msm_dsi->external_bridge) msm_dsi->connector->funcs->destroy(msm_dsi->connector); - msm_dsi->connector = NULL; - } + + msm_dsi->connector = NULL; } return ret; diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 92d697de4858..5f5a3732cdf6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -27,21 +27,10 @@ #define DSI_1 1 #define DSI_MAX 2 -#define DSI_CLOCK_MASTER DSI_0 -#define DSI_CLOCK_SLAVE DSI_1 - -#define DSI_LEFT DSI_0 -#define DSI_RIGHT DSI_1 - -/* According to the current drm framework sequence, take the encoder of - * DSI_1 as master encoder - */ -#define DSI_ENCODER_MASTER DSI_1 -#define DSI_ENCODER_SLAVE DSI_0 - enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_HPM, MSM_DSI_PHY_28NM_LP, + MSM_DSI_PHY_20NM, MSM_DSI_PHY_MAX }; @@ -65,13 +54,21 @@ struct msm_dsi { struct drm_device *dev; struct platform_device *pdev; + /* connector managed by us when we're connected to a drm_panel */ struct drm_connector *connector; + /* internal dsi bridge attached to MDP interface */ struct drm_bridge *bridge; struct mipi_dsi_host *host; struct msm_dsi_phy *phy; + + /* + * panel/external_bridge connected to dsi bridge output, only one of the + * two can be valid at a time + */ struct drm_panel *panel; - unsigned long panel_flags; + struct drm_bridge *external_bridge; + unsigned long device_flags; struct device *phy_dev; bool phy_enabled; @@ -86,6 +83,7 @@ struct msm_dsi { struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); struct drm_connector *msm_dsi_manager_connector_init(u8 id); +struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); int msm_dsi_manager_phy_enable(int id, const unsigned long bit_rate, const unsigned long esc_rate, u32 *clk_pre, u32 *clk_post); @@ -96,6 +94,11 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); /* msm dsi */ +static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi) +{ + return msm_dsi->panel || msm_dsi->external_bridge; +} + struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi); /* dsi pll */ @@ -106,6 +109,8 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, void msm_dsi_pll_destroy(struct msm_dsi_pll *pll); int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, struct clk **byte_clk_provider, struct clk **pixel_clk_provider); +void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); +int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); #else static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { @@ -119,6 +124,13 @@ static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, { return -ENODEV; } +static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll) +{ +} +static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) +{ + return 0; +} #endif /* dsi host */ @@ -140,6 +152,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode); struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, unsigned long *panel_flags); +struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); void msm_dsi_host_unregister(struct mipi_dsi_host *host); int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, @@ -153,9 +166,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi); struct msm_dsi_phy; void msm_dsi_phy_driver_register(void); void msm_dsi_phy_driver_unregister(void); -int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, const unsigned long bit_rate, const unsigned long esc_rate); -int msm_dsi_phy_disable(struct msm_dsi_phy *phy); +void msm_dsi_phy_disable(struct msm_dsi_phy *phy); void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, u32 *clk_pre, u32 *clk_post); struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 9791ea04bcbc..1d2e32f0817b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -382,6 +382,11 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val) #define REG_DSI_TRIG_DMA 0x0000008c #define REG_DSI_DLN0_PHY_ERR 0x000000b0 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000 #define REG_DSI_TIMEOUT_STATUS 0x000000bc @@ -435,6 +440,9 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val) #define REG_DSI_PHY_RESET 0x00000128 #define DSI_PHY_RESET_RESET 0x00000001 +#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c +#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001 + #define REG_DSI_RDBK_DATA_CTRL 0x000001d0 #define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000 #define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16 @@ -830,6 +838,7 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) #define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8 #define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4 +#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001 #define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc @@ -994,5 +1003,185 @@ static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val) #define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4 +static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; } + +static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; } + +#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100 + +#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104 + +#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108 + +#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c + +#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110 + +#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114 + +#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118 + +#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c + +#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120 + +#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140 +#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144 +#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148 +#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c +#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001 + +#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150 +#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154 +#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158 +#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c +#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160 +#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164 +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007 +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK; +} +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 +#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168 +#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007 +#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK; +} + +#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c +#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff +#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 +static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) +{ + return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK; +} + +#define REG_DSI_20nm_PHY_CTRL_0 0x00000170 + +#define REG_DSI_20nm_PHY_CTRL_1 0x00000174 + +#define REG_DSI_20nm_PHY_CTRL_2 0x00000178 + +#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c + +#define REG_DSI_20nm_PHY_CTRL_4 0x00000180 + +#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184 + +#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188 + +#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4 + +#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8 + +#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc + +#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0 + +#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4 + +#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8 + +#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4 +#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001 + +#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010 + +#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014 + +#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 + #endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c new file mode 100644 index 000000000000..5872d5e5934f --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_cfg.h" + +/* DSI v2 has not been supported by now */ +static const struct msm_dsi_config dsi_v2_cfg = { + .io_offset = 0, +}; + +static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 3000000, 3000000, 150000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, +}; + +static const struct msm_dsi_config msm8916_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 2850000, 2850000, 100000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, +}; + +static const struct msm_dsi_config msm8994_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 7, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdda", 1250000, 1250000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + {"vcca", 1000000, 1000000, 10000, 100}, + {"vdd", 1800000, 1800000, 100000, 100}, + {"lab_reg", -1, -1, -1, -1}, + {"ibb_reg", -1, -1, -1, -1}, + }, + } +}; + +static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { + {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2, + &msm8974_apq8084_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) +{ + const struct msm_dsi_cfg_handler *cfg_hnd = NULL; + int i; + + for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) { + if ((dsi_cfg_handlers[i].major == major) && + (dsi_cfg_handlers[i].minor == minor)) { + cfg_hnd = &dsi_cfg_handlers[i]; + break; + } + } + + return cfg_hnd; +} + diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h new file mode 100644 index 000000000000..4cf887240177 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_DSI_CFG_H__ +#define __MSM_DSI_CFG_H__ + +#include "dsi.h" + +#define MSM_DSI_VER_MAJOR_V2 0x02 +#define MSM_DSI_VER_MAJOR_6G 0x03 +#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 +#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 +#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 +#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 +#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 +#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 + +#define DSI_6G_REG_SHIFT 4 + +struct msm_dsi_config { + u32 io_offset; + struct dsi_reg_config reg_cfg; +}; + +struct msm_dsi_cfg_handler { + u32 major; + u32 minor; + const struct msm_dsi_config *cfg; +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor); + +#endif /* __MSM_DSI_CFG_H__ */ + diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index de0400923303..8d82973fe9db 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -20,103 +20,15 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_irq.h> +#include <linux/pinctrl/consumer.h> +#include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <linux/spinlock.h> #include <video/mipi_display.h> #include "dsi.h" #include "dsi.xml.h" - -#define MSM_DSI_VER_MAJOR_V2 0x02 -#define MSM_DSI_VER_MAJOR_6G 0x03 -#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 -#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 -#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 -#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 -#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 - -#define DSI_6G_REG_SHIFT 4 - -struct dsi_config { - u32 major; - u32 minor; - u32 io_offset; - struct dsi_reg_config reg_cfg; -}; - -static const struct dsi_config dsi_cfgs[] = { - {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} }, - { /* 8974 v1 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_0, - .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8974 v2 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_1, - .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8974 v3 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_1_1, - .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8084 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_2, - .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, - { /* 8916 */ - .major = MSM_DSI_VER_MAJOR_6G, - .minor = MSM_DSI_6G_VER_MINOR_V1_3_1, - .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 4, - .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 2850000, 2850000, 100000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - }, -}; +#include "dsi_cfg.h" static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) { @@ -194,7 +106,7 @@ struct msm_dsi_host { struct gpio_desc *disp_en_gpio; struct gpio_desc *te_gpio; - const struct dsi_config *cfg; + const struct msm_dsi_cfg_handler *cfg_hnd; struct completion dma_comp; struct completion video_comp; @@ -212,8 +124,8 @@ struct msm_dsi_host { struct drm_display_mode *mode; - /* Panel info */ - struct device_node *panel_node; + /* connected device info */ + struct device_node *device_node; unsigned int channel; unsigned int lanes; enum mipi_dsi_pixel_format format; @@ -239,61 +151,58 @@ static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt) static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) { - return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg); + return msm_readl(msm_host->ctrl_base + reg); } static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) { - msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg); + msm_writel(data, msm_host->ctrl_base + reg); } static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); -static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host) +static const struct msm_dsi_cfg_handler *dsi_get_config( + struct msm_dsi_host *msm_host) { - const struct dsi_config *cfg; + const struct msm_dsi_cfg_handler *cfg_hnd = NULL; struct regulator *gdsc_reg; - int i, ret; + int ret; u32 major = 0, minor = 0; gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); if (IS_ERR(gdsc_reg)) { pr_err("%s: cannot get gdsc\n", __func__); - goto fail; + goto exit; } ret = regulator_enable(gdsc_reg); if (ret) { pr_err("%s: unable to enable gdsc\n", __func__); - regulator_put(gdsc_reg); - goto fail; + goto put_gdsc; } ret = clk_prepare_enable(msm_host->ahb_clk); if (ret) { pr_err("%s: unable to enable ahb_clk\n", __func__); - regulator_disable(gdsc_reg); - regulator_put(gdsc_reg); - goto fail; + goto disable_gdsc; } ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); - - clk_disable_unprepare(msm_host->ahb_clk); - regulator_disable(gdsc_reg); - regulator_put(gdsc_reg); if (ret) { pr_err("%s: Invalid version\n", __func__); - goto fail; + goto disable_clks; } - for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) { - cfg = dsi_cfgs + i; - if ((cfg->major == major) && (cfg->minor == minor)) - return cfg; - } - pr_err("%s: Version %x:%x not support\n", __func__, major, minor); + cfg_hnd = msm_dsi_cfg_get(major, minor); -fail: - return NULL; + DBG("%s: Version %x:%x\n", __func__, major, minor); + +disable_clks: + clk_disable_unprepare(msm_host->ahb_clk); +disable_gdsc: + regulator_disable(gdsc_reg); +put_gdsc: + regulator_put(gdsc_reg); +exit: + return cfg_hnd; } static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) @@ -304,8 +213,8 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) { struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; - int num = msm_host->cfg->reg_cfg.num; + const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; + int num = msm_host->cfg_hnd->cfg->reg_cfg.num; int i; DBG(""); @@ -320,8 +229,8 @@ static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) { struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; - int num = msm_host->cfg->reg_cfg.num; + const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; + int num = msm_host->cfg_hnd->cfg->reg_cfg.num; int ret, i; DBG(""); @@ -354,8 +263,8 @@ fail: static int dsi_regulator_init(struct msm_dsi_host *msm_host) { struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; - int num = msm_host->cfg->reg_cfg.num; + const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; + int num = msm_host->cfg_hnd->cfg->reg_cfg.num; int i, ret; for (i = 0; i < num; i++) @@ -697,6 +606,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, { u32 flags = msm_host->mode_flags; enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; u32 data = 0; if (!enable) { @@ -750,8 +660,8 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); - if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && - (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); @@ -1257,7 +1167,11 @@ static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); - if (status) { + if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC | + DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) { dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; } @@ -1359,7 +1273,8 @@ static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host, return PTR_ERR(msm_host->disp_en_gpio); } - msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN); + msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te", + GPIOD_IN); if (IS_ERR(msm_host->te_gpio)) { DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); return PTR_ERR(msm_host->te_gpio); @@ -1379,7 +1294,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host, msm_host->format = dsi->format; msm_host->mode_flags = dsi->mode_flags; - msm_host->panel_node = dsi->dev.of_node; + WARN_ON(dsi->dev.of_node != msm_host->device_node); /* Some gpios defined in panel DT need to be controlled by host */ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); @@ -1398,7 +1313,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - msm_host->panel_node = NULL; + msm_host->device_node = NULL; DBG("id=%d", msm_host->id); if (msm_host->dev) @@ -1429,6 +1344,48 @@ static struct mipi_dsi_host_ops dsi_host_ops = { .transfer = dsi_host_transfer, }; +static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) +{ + struct device *dev = &msm_host->pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *endpoint, *device_node; + int ret; + + ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id); + if (ret) { + dev_err(dev, "%s: host index not specified, ret=%d\n", + __func__, ret); + return ret; + } + + /* + * Get the first endpoint node. In our case, dsi has one output port + * to which the panel is connected. Don't return an error if a port + * isn't defined. It's possible that there is nothing connected to + * the dsi output. + */ + endpoint = of_graph_get_next_endpoint(np, NULL); + if (!endpoint) { + dev_dbg(dev, "%s: no endpoint\n", __func__); + return 0; + } + + /* Get panel node from the output port's endpoint data */ + device_node = of_graph_get_remote_port_parent(endpoint); + if (!device_node) { + dev_err(dev, "%s: no valid device\n", __func__); + of_node_put(endpoint); + return -ENODEV; + } + + of_node_put(endpoint); + of_node_put(device_node); + + msm_host->device_node = device_node; + + return 0; +} + int msm_dsi_host_init(struct msm_dsi *msm_dsi) { struct msm_dsi_host *msm_host = NULL; @@ -1443,15 +1400,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) goto fail; } - ret = of_property_read_u32(pdev->dev.of_node, - "qcom,dsi-host-index", &msm_host->id); + msm_host->pdev = pdev; + + ret = dsi_host_parse_dt(msm_host); if (ret) { - dev_err(&pdev->dev, - "%s: host index not specified, ret=%d\n", - __func__, ret); + pr_err("%s: failed to parse dt\n", __func__); goto fail; } - msm_host->pdev = pdev; ret = dsi_clk_init(msm_host); if (ret) { @@ -1466,13 +1421,16 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) goto fail; } - msm_host->cfg = dsi_get_config(msm_host); - if (!msm_host->cfg) { + msm_host->cfg_hnd = dsi_get_config(msm_host); + if (!msm_host->cfg_hnd) { ret = -EINVAL; pr_err("%s: get config failed\n", __func__); goto fail; } + /* fixup base address by io offset */ + msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; + ret = dsi_regulator_init(msm_host); if (ret) { pr_err("%s: regulator init failed\n", __func__); @@ -1559,7 +1517,6 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - struct device_node *node; int ret; /* Register mipi dsi host */ @@ -1577,14 +1534,13 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) * It makes sure panel is connected when fbcon detects * connector status and gets the proper display mode to * create framebuffer. + * Don't try to defer if there is nothing connected to the dsi + * output */ - if (check_defer) { - node = of_get_child_by_name(msm_host->pdev->dev.of_node, - "panel"); - if (node) { - if (!of_drm_find_panel(node)) + if (check_defer && msm_host->device_node) { + if (!of_drm_find_panel(msm_host->device_node)) + if (!of_drm_find_bridge(msm_host->device_node)) return -EPROBE_DEFER; - } } } @@ -1663,6 +1619,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; int data_byte, rx_byte, dlen, end; int short_response, diff, pkt_size, ret = 0; char cmd; @@ -1704,8 +1661,8 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, return -EINVAL; } - if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && - (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { /* Clear the RDBK_DATA registers */ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, DSI_RDBK_DATA_CTRL_CLR); @@ -1919,6 +1876,13 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) goto fail_disable_reg; } + ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev); + if (ret) { + pr_err("%s: failed to set pinctrl default state, %d\n", + __func__, ret); + goto fail_disable_clk; + } + dsi_timing_setup(msm_host); dsi_sw_reset(msm_host); dsi_ctrl_config(msm_host, true, clk_pre, clk_post); @@ -1931,6 +1895,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) return 0; +fail_disable_clk: + dsi_clk_ctrl(msm_host, 0); fail_disable_reg: dsi_host_regulator_disable(msm_host); unlock_ret: @@ -1953,6 +1919,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 0); + pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); + msm_dsi_manager_phy_disable(msm_host->id); dsi_clk_ctrl(msm_host, 0); @@ -1993,10 +1961,16 @@ struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct drm_panel *panel; - panel = of_drm_find_panel(msm_host->panel_node); + panel = of_drm_find_panel(msm_host->device_node); if (panel_flags) *panel_flags = msm_host->mode_flags; return panel; } +struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + return of_drm_find_bridge(msm_host->device_node); +} diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 87ac6612b6f8..0455ff75074a 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -14,19 +14,31 @@ #include "msm_kms.h" #include "dsi.h" +#define DSI_CLOCK_MASTER DSI_0 +#define DSI_CLOCK_SLAVE DSI_1 + +#define DSI_LEFT DSI_0 +#define DSI_RIGHT DSI_1 + +/* According to the current drm framework sequence, take the encoder of + * DSI_1 as master encoder + */ +#define DSI_ENCODER_MASTER DSI_1 +#define DSI_ENCODER_SLAVE DSI_0 + struct msm_dsi_manager { struct msm_dsi *dsi[DSI_MAX]; - bool is_dual_panel; + bool is_dual_dsi; bool is_sync_needed; - int master_panel_id; + int master_dsi_link_id; }; static struct msm_dsi_manager msm_dsim_glb; -#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel) +#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi) #define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed) -#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id) +#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id) static inline struct msm_dsi *dsi_mgr_get_dsi(int id) { @@ -38,23 +50,23 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id) return msm_dsim_glb.dsi[(id + 1) % DSI_MAX]; } -static int dsi_mgr_parse_dual_panel(struct device_node *np, int id) +static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id) { struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; - /* We assume 2 dsi nodes have the same information of dual-panel and + /* We assume 2 dsi nodes have the same information of dual-dsi and * sync-mode, and only one node specifies master in case of dual mode. */ - if (!msm_dsim->is_dual_panel) - msm_dsim->is_dual_panel = of_property_read_bool( - np, "qcom,dual-panel-mode"); + if (!msm_dsim->is_dual_dsi) + msm_dsim->is_dual_dsi = of_property_read_bool( + np, "qcom,dual-dsi-mode"); - if (msm_dsim->is_dual_panel) { - if (of_property_read_bool(np, "qcom,master-panel")) - msm_dsim->master_panel_id = id; + if (msm_dsim->is_dual_dsi) { + if (of_property_read_bool(np, "qcom,master-dsi")) + msm_dsim->master_dsi_link_id = id; if (!msm_dsim->is_sync_needed) msm_dsim->is_sync_needed = of_property_read_bool( - np, "qcom,sync-dual-panel"); + np, "qcom,sync-dual-dsi"); } return 0; @@ -68,7 +80,7 @@ static int dsi_mgr_host_register(int id) struct msm_dsi_pll *src_pll; int ret; - if (!IS_DUAL_PANEL()) { + if (!IS_DUAL_DSI()) { ret = msm_dsi_host_register(msm_dsi->host, true); if (ret) return ret; @@ -78,9 +90,9 @@ static int dsi_mgr_host_register(int id) } else if (!other_dsi) { ret = 0; } else { - struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ? + struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ? msm_dsi : other_dsi; - struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ? + struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ? other_dsi : msm_dsi; /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master @@ -144,28 +156,28 @@ static enum drm_connector_status dsi_mgr_connector_detect( DBG("id=%d", id); if (!msm_dsi->panel) { msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host, - &msm_dsi->panel_flags); + &msm_dsi->device_flags); /* There is only 1 panel in the global panel list - * for dual panel mode. Therefore slave dsi should get + * for dual DSI mode. Therefore slave dsi should get * the drm_panel instance from master dsi, and * keep using the panel flags got from the current DSI link. */ - if (!msm_dsi->panel && IS_DUAL_PANEL() && - !IS_MASTER_PANEL(id) && other_dsi) + if (!msm_dsi->panel && IS_DUAL_DSI() && + !IS_MASTER_DSI_LINK(id) && other_dsi) msm_dsi->panel = msm_dsi_host_get_panel( other_dsi->host, NULL); - if (msm_dsi->panel && IS_DUAL_PANEL()) + if (msm_dsi->panel && IS_DUAL_DSI()) drm_object_attach_property(&connector->base, connector->dev->mode_config.tile_property, 0); - /* Set split display info to kms once dual panel is connected - * to both hosts + /* Set split display info to kms once dual DSI panel is + * connected to both hosts. */ - if (msm_dsi->panel && IS_DUAL_PANEL() && + if (msm_dsi->panel && IS_DUAL_DSI() && other_dsi && other_dsi->panel) { - bool cmd_mode = !(msm_dsi->panel_flags & + bool cmd_mode = !(msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO); struct drm_encoder *encoder = msm_dsi_get_encoder( dsi_mgr_get_dsi(DSI_ENCODER_MASTER)); @@ -176,7 +188,7 @@ static enum drm_connector_status dsi_mgr_connector_detect( kms->funcs->set_split_display(kms, encoder, slave_enc, cmd_mode); else - pr_err("mdp does not support dual panel\n"); + pr_err("mdp does not support dual DSI\n"); } } @@ -273,7 +285,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) if (!num) return 0; - if (IS_DUAL_PANEL()) { + if (IS_DUAL_DSI()) { /* report half resolution to user */ dsi_dual_connector_fix_modes(connector); ret = dsi_dual_connector_tile_init(connector, id); @@ -328,11 +340,12 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; - bool is_dual_panel = IS_DUAL_PANEL(); + bool is_dual_dsi = IS_DUAL_DSI(); int ret; DBG("id=%d", id); - if (!panel || (is_dual_panel && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi) || + (is_dual_dsi && (DSI_1 == id))) return; ret = msm_dsi_host_power_on(host); @@ -341,7 +354,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) goto host_on_fail; } - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_power_on(msm_dsi1->host); if (ret) { pr_err("%s: power on host1 failed, %d\n", @@ -353,10 +366,13 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) /* Always call panel functions once, because even for dual panels, * there is only one drm_panel instance. */ - ret = drm_panel_prepare(panel); - if (ret) { - pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret); - goto panel_prep_fail; + if (panel) { + ret = drm_panel_prepare(panel); + if (ret) { + pr_err("%s: prepare panel %d failed, %d\n", __func__, + id, ret); + goto panel_prep_fail; + } } ret = msm_dsi_host_enable(host); @@ -365,7 +381,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) goto host_en_fail; } - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_enable(msm_dsi1->host); if (ret) { pr_err("%s: enable host1 failed, %d\n", __func__, ret); @@ -373,23 +389,27 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) } } - ret = drm_panel_enable(panel); - if (ret) { - pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret); - goto panel_en_fail; + if (panel) { + ret = drm_panel_enable(panel); + if (ret) { + pr_err("%s: enable panel %d failed, %d\n", __func__, id, + ret); + goto panel_en_fail; + } } return; panel_en_fail: - if (is_dual_panel && msm_dsi1) + if (is_dual_dsi && msm_dsi1) msm_dsi_host_disable(msm_dsi1->host); host1_en_fail: msm_dsi_host_disable(host); host_en_fail: - drm_panel_unprepare(panel); + if (panel) + drm_panel_unprepare(panel); panel_prep_fail: - if (is_dual_panel && msm_dsi1) + if (is_dual_dsi && msm_dsi1) msm_dsi_host_power_off(msm_dsi1->host); host1_on_fail: msm_dsi_host_power_off(host); @@ -414,37 +434,44 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; - bool is_dual_panel = IS_DUAL_PANEL(); + bool is_dual_dsi = IS_DUAL_DSI(); int ret; DBG("id=%d", id); - if (!panel || (is_dual_panel && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi) || + (is_dual_dsi && (DSI_1 == id))) return; - ret = drm_panel_disable(panel); - if (ret) - pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret); + if (panel) { + ret = drm_panel_disable(panel); + if (ret) + pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, + ret); + } ret = msm_dsi_host_disable(host); if (ret) pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_disable(msm_dsi1->host); if (ret) pr_err("%s: host1 disable failed, %d\n", __func__, ret); } - ret = drm_panel_unprepare(panel); - if (ret) - pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret); + if (panel) { + ret = drm_panel_unprepare(panel); + if (ret) + pr_err("%s: Panel %d unprepare failed,%d\n", __func__, + id, ret); + } ret = msm_dsi_host_power_off(host); if (ret) pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); - if (is_dual_panel && msm_dsi1) { + if (is_dual_dsi && msm_dsi1) { ret = msm_dsi_host_power_off(msm_dsi1->host); if (ret) pr_err("%s: host1 power off failed, %d\n", @@ -460,7 +487,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); struct mipi_dsi_host *host = msm_dsi->host; - bool is_dual_panel = IS_DUAL_PANEL(); + bool is_dual_dsi = IS_DUAL_DSI(); DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", mode->base.id, mode->name, @@ -471,11 +498,11 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - if (is_dual_panel && (DSI_1 == id)) + if (is_dual_dsi && (DSI_1 == id)) return; msm_dsi_host_set_display_mode(host, adjusted_mode); - if (is_dual_panel && other_dsi) + if (is_dual_dsi && other_dsi) msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); } @@ -503,7 +530,7 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = { .mode_set = dsi_mgr_bridge_mode_set, }; -/* initialize connector */ +/* initialize connector when we're connected to a drm_panel */ struct drm_connector *msm_dsi_manager_connector_init(u8 id) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); @@ -588,6 +615,53 @@ fail: return ERR_PTR(ret); } +struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_device *dev = msm_dsi->dev; + struct drm_encoder *encoder; + struct drm_bridge *int_bridge, *ext_bridge; + struct drm_connector *connector; + struct list_head *connector_list; + + int_bridge = msm_dsi->bridge; + ext_bridge = msm_dsi->external_bridge = + msm_dsi_host_get_bridge(msm_dsi->host); + + /* + * HACK: we may not know the external DSI bridge device's mode + * flags here. We'll get to know them only when the device + * attaches to the dsi host. For now, assume the bridge supports + * DSI video mode + */ + encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; + + /* link the internal dsi bridge to the external bridge */ + int_bridge->next = ext_bridge; + /* set the external bridge's encoder as dsi's encoder */ + ext_bridge->encoder = encoder; + + drm_bridge_attach(dev, ext_bridge); + + /* + * we need the drm_connector created by the external bridge + * driver (or someone else) to feed it to our driver's + * priv->connector[] list, mainly for msm_fbdev_init() + */ + connector_list = &dev->mode_config.connector_list; + + list_for_each_entry(connector, connector_list, head) { + int i; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == encoder->base.id) + return connector; + } + } + + return ERR_PTR(-ENODEV); +} + void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) { } @@ -598,12 +672,29 @@ int msm_dsi_manager_phy_enable(int id, { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi_phy *phy = msm_dsi->phy; + int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; + struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); int ret; - ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate); + ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate); if (ret) return ret; + /* + * Reset DSI PHY silently changes its PLL registers to reset status, + * which will confuse clock driver and result in wrong output rate of + * link clocks. Restore PLL status if its PLL is being used as clock + * source. + */ + if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) { + ret = msm_dsi_pll_restore_state(pll); + if (ret) { + pr_err("%s: failed to restore pll state\n", __func__); + msm_dsi_phy_disable(phy); + return ret; + } + } + msm_dsi->phy_enabled = true; msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post); @@ -616,13 +707,18 @@ void msm_dsi_manager_phy_disable(int id) struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); struct msm_dsi_phy *phy = msm_dsi->phy; + struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); + + /* Save PLL status if it is a clock source */ + if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) + msm_dsi_pll_save_state(pll); /* disable DSI phy * In dual-dsi configuration, the phy should be disabled for the * first controller only when the second controller is disabled. */ msm_dsi->phy_enabled = false; - if (IS_DUAL_PANEL() && mdsi && sdsi) { + if (IS_DUAL_DSI() && mdsi && sdsi) { if (!mdsi->phy_enabled && !sdsi->phy_enabled) { msm_dsi_phy_disable(sdsi->phy); msm_dsi_phy_disable(mdsi->phy); @@ -713,9 +809,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) msm_dsim->dsi[id] = msm_dsi; - ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id); + ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id); if (ret) { - pr_err("%s: failed to parse dual panel info\n", __func__); + pr_err("%s: failed to parse dual DSI info\n", __func__); goto fail; } diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index 728152f3ef48..5de505e627be 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) - -Copyright (C) 2013-2014 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 2d3b33ce1cc5..401ff58d6893 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -12,142 +12,8 @@ */ #include <linux/platform_device.h> -#include <linux/regulator/consumer.h> -#include "dsi.h" -#include "dsi.xml.h" - -#define dsi_phy_read(offset) msm_readl((offset)) -#define dsi_phy_write(offset, data) msm_writel((data), (offset)) - -struct dsi_phy_ops { - int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel, - const unsigned long bit_rate, const unsigned long esc_rate); - int (*disable)(struct msm_dsi_phy *phy); -}; - -struct dsi_phy_cfg { - enum msm_dsi_phy_type type; - struct dsi_reg_config reg_cfg; - struct dsi_phy_ops ops; -}; - -struct dsi_dphy_timing { - u32 clk_pre; - u32 clk_post; - u32 clk_zero; - u32 clk_trail; - u32 clk_prepare; - u32 hs_exit; - u32 hs_zero; - u32 hs_prepare; - u32 hs_trail; - u32 hs_rqst; - u32 ta_go; - u32 ta_sure; - u32 ta_get; -}; - -struct msm_dsi_phy { - struct platform_device *pdev; - void __iomem *base; - void __iomem *reg_base; - int id; - - struct clk *ahb_clk; - struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; - - struct dsi_dphy_timing timing; - const struct dsi_phy_cfg *cfg; - - struct msm_dsi_pll *pll; -}; - -static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) -{ - struct regulator_bulk_data *s = phy->supplies; - const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; - struct device *dev = &phy->pdev->dev; - int num = phy->cfg->reg_cfg.num; - int i, ret; - - for (i = 0; i < num; i++) - s[i].supply = regs[i].name; - - ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s); - if (ret < 0) { - dev_err(dev, "%s: failed to init regulator, ret=%d\n", - __func__, ret); - return ret; - } - - for (i = 0; i < num; i++) { - if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) { - ret = regulator_set_voltage(s[i].consumer, - regs[i].min_voltage, regs[i].max_voltage); - if (ret < 0) { - dev_err(dev, - "regulator %d set voltage failed, %d\n", - i, ret); - return ret; - } - } - } - - return 0; -} - -static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy) -{ - struct regulator_bulk_data *s = phy->supplies; - const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; - int num = phy->cfg->reg_cfg.num; - int i; - - DBG(""); - for (i = num - 1; i >= 0; i--) - if (regs[i].disable_load >= 0) - regulator_set_load(s[i].consumer, - regs[i].disable_load); - - regulator_bulk_disable(num, s); -} - -static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy) -{ - struct regulator_bulk_data *s = phy->supplies; - const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; - struct device *dev = &phy->pdev->dev; - int num = phy->cfg->reg_cfg.num; - int ret, i; - - DBG(""); - for (i = 0; i < num; i++) { - if (regs[i].enable_load >= 0) { - ret = regulator_set_load(s[i].consumer, - regs[i].enable_load); - if (ret < 0) { - dev_err(dev, - "regulator %d set op mode failed, %d\n", - i, ret); - goto fail; - } - } - } - - ret = regulator_bulk_enable(num, s); - if (ret < 0) { - dev_err(dev, "regulator enable failed, %d\n", ret); - goto fail; - } - - return 0; - -fail: - for (i--; i >= 0; i--) - regulator_set_load(s[i].consumer, regs[i].disable_load); - return ret; -} +#include "dsi_phy.h" #define S_DIV_ROUND_UP(n, d) \ (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) @@ -156,6 +22,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, s32 min_result, bool even) { s32 v; + v = (tmax - tmin) * percent; v = S_DIV_ROUND_UP(v, 100) + tmin; if (even && (v & 0x1)) @@ -164,7 +31,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, return max_t(s32, min_result, v); } -static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing, +static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, s32 ui, s32 coeff, s32 pcnt) { s32 tmax, tmin, clk_z; @@ -186,7 +53,7 @@ static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing, timing->clk_zero = clk_z + 8 - temp; } -static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing, +int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, const unsigned long bit_rate, const unsigned long esc_rate) { s32 ui, lpx; @@ -256,9 +123,8 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing, temp += 8 * ui + lpx; tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; if (tmin > tmax) { - temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1; + temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); timing->clk_pre = temp >> 1; - temp = (2 * tmax - tmin) * pcnt2; } else { timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); } @@ -276,130 +142,119 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing, return 0; } -static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, + u32 bit_mask) { - void __iomem *base = phy->reg_base; + int phy_id = phy->id; + u32 val; - if (!enable) { - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX)) return; - } - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); + val = dsi_phy_read(phy->base + reg); + + if (phy->cfg->src_pll_truthtable[phy_id][pll_id]) + dsi_phy_write(phy->base + reg, val | bit_mask); + else + dsi_phy_write(phy->base + reg, val & (~bit_mask)); } -static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, - const unsigned long bit_rate, const unsigned long esc_rate) +static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) { - struct dsi_dphy_timing *timing = &phy->timing; - int i; - void __iomem *base = phy->base; + struct regulator_bulk_data *s = phy->supplies; + const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; + struct device *dev = &phy->pdev->dev; + int num = phy->cfg->reg_cfg.num; + int i, ret; - DBG(""); + for (i = 0; i < num; i++) + s[i].supply = regs[i].name; - if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { - pr_err("%s: D-PHY timing calculation failed\n", __func__); - return -EINVAL; + ret = devm_regulator_bulk_get(dev, num, s); + if (ret < 0) { + dev_err(dev, "%s: failed to init regulator, ret=%d\n", + __func__, ret); + return ret; } - dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff); - - dsi_28nm_phy_regulator_ctrl(phy, true); - - dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); - - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0, - DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1, - DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2, - DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); - if (timing->clk_zero & BIT(8)) - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3, - DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4, - DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5, - DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6, - DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7, - DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8, - DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9, - DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | - DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10, - DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); - dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11, - DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); - - dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00); - dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); - - dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6); - - for (i = 0; i < 4; i++) { - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97); + for (i = 0; i < num; i++) { + if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) { + ret = regulator_set_voltage(s[i].consumer, + regs[i].min_voltage, regs[i].max_voltage); + if (ret < 0) { + dev_err(dev, + "regulator %d set voltage failed, %d\n", + i, ret); + return ret; + } + } } - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa); - dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf); - dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0); - dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1); - dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb); + return 0; +} - dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); +static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy) +{ + struct regulator_bulk_data *s = phy->supplies; + const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; + int num = phy->cfg->reg_cfg.num; + int i; - if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER)) - dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00); - else - dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01); + DBG(""); + for (i = num - 1; i >= 0; i--) + if (regs[i].disable_load >= 0) + regulator_set_load(s[i].consumer, regs[i].disable_load); - return 0; + regulator_bulk_disable(num, s); } -static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy) +static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy) { - dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0); - dsi_28nm_phy_regulator_ctrl(phy, false); + struct regulator_bulk_data *s = phy->supplies; + const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; + struct device *dev = &phy->pdev->dev; + int num = phy->cfg->reg_cfg.num; + int ret, i; - /* - * Wait for the registers writes to complete in order to - * ensure that the phy is completely disabled - */ - wmb(); + DBG(""); + for (i = 0; i < num; i++) { + if (regs[i].enable_load >= 0) { + ret = regulator_set_load(s[i].consumer, + regs[i].enable_load); + if (ret < 0) { + dev_err(dev, + "regulator %d set op mode failed, %d\n", + i, ret); + goto fail; + } + } + } + + ret = regulator_bulk_enable(num, s); + if (ret < 0) { + dev_err(dev, "regulator enable failed, %d\n", ret); + goto fail; + } return 0; + +fail: + for (i--; i >= 0; i--) + regulator_set_load(s[i].consumer, regs[i].disable_load); + return ret; } static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) { + struct device *dev = &phy->pdev->dev; int ret; - pm_runtime_get_sync(&phy->pdev->dev); + pm_runtime_get_sync(dev); ret = clk_prepare_enable(phy->ahb_clk); if (ret) { - pr_err("%s: can't enable ahb clk, %d\n", __func__, ret); - pm_runtime_put_sync(&phy->pdev->dev); + dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret); + pm_runtime_put_sync(dev); } return ret; @@ -411,92 +266,74 @@ static void dsi_phy_disable_resource(struct msm_dsi_phy *phy) pm_runtime_put_sync(&phy->pdev->dev); } -static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = { - [MSM_DSI_PHY_28NM_HPM] = { - .type = MSM_DSI_PHY_28NM_HPM, - .reg_cfg = { - .num = 1, - .regs = { - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - .ops = { - .enable = dsi_28nm_phy_enable, - .disable = dsi_28nm_phy_disable, - } - }, - [MSM_DSI_PHY_28NM_LP] = { - .type = MSM_DSI_PHY_28NM_LP, - .reg_cfg = { - .num = 1, - .regs = { - {"vddio", 1800000, 1800000, 100000, 100}, - }, - }, - .ops = { - .enable = dsi_28nm_phy_enable, - .disable = dsi_28nm_phy_disable, - } - }, -}; - static const struct of_device_id dsi_phy_dt_match[] = { +#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY { .compatible = "qcom,dsi-phy-28nm-hpm", - .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],}, + .data = &dsi_phy_28nm_hpm_cfgs }, { .compatible = "qcom,dsi-phy-28nm-lp", - .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],}, + .data = &dsi_phy_28nm_lp_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY + { .compatible = "qcom,dsi-phy-20nm", + .data = &dsi_phy_20nm_cfgs }, +#endif {} }; static int dsi_phy_driver_probe(struct platform_device *pdev) { struct msm_dsi_phy *phy; + struct device *dev = &pdev->dev; const struct of_device_id *match; int ret; - phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; - match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node); + match = of_match_node(dsi_phy_dt_match, dev->of_node); if (!match) return -ENODEV; phy->cfg = match->data; phy->pdev = pdev; - ret = of_property_read_u32(pdev->dev.of_node, + ret = of_property_read_u32(dev->of_node, "qcom,dsi-phy-index", &phy->id); if (ret) { - dev_err(&pdev->dev, - "%s: PHY index not specified, ret=%d\n", + dev_err(dev, "%s: PHY index not specified, %d\n", __func__, ret); goto fail; } + phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, + "qcom,dsi-phy-regulator-ldo-mode"); + phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); if (IS_ERR(phy->base)) { - dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__); + dev_err(dev, "%s: failed to map phy base\n", __func__); ret = -ENOMEM; goto fail; } - phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG"); + + phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", + "DSI_PHY_REG"); if (IS_ERR(phy->reg_base)) { - dev_err(&pdev->dev, - "%s: failed to map phy regulator base\n", __func__); + dev_err(dev, "%s: failed to map phy regulator base\n", + __func__); ret = -ENOMEM; goto fail; } ret = dsi_phy_regulator_init(phy); if (ret) { - dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__); + dev_err(dev, "%s: failed to init regulator\n", __func__); goto fail; } - phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk"); + phy->ahb_clk = devm_clk_get(dev, "iface_clk"); if (IS_ERR(phy->ahb_clk)) { - pr_err("%s: Unable to get ahb clk\n", __func__); + dev_err(dev, "%s: Unable to get ahb clk\n", __func__); ret = PTR_ERR(phy->ahb_clk); goto fail; } @@ -510,7 +347,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); if (!phy->pll) - dev_info(&pdev->dev, + dev_info(dev, "%s: pll init failed, need separate pll clk driver\n", __func__); @@ -557,9 +394,10 @@ void __exit msm_dsi_phy_driver_unregister(void) platform_driver_unregister(&dsi_phy_platform_driver); } -int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, const unsigned long bit_rate, const unsigned long esc_rate) { + struct device *dev = &phy->pdev->dev; int ret; if (!phy || !phy->cfg->ops.enable) @@ -567,30 +405,37 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, ret = dsi_phy_regulator_enable(phy); if (ret) { - dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n", + dev_err(dev, "%s: regulator enable failed, %d\n", __func__, ret); return ret; } - return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate); + ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); + if (ret) { + dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); + dsi_phy_regulator_disable(phy); + return ret; + } + + return 0; } -int msm_dsi_phy_disable(struct msm_dsi_phy *phy) +void msm_dsi_phy_disable(struct msm_dsi_phy *phy) { if (!phy || !phy->cfg->ops.disable) - return -EINVAL; + return; phy->cfg->ops.disable(phy); - dsi_phy_regulator_disable(phy); - return 0; + dsi_phy_regulator_disable(phy); } void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, - u32 *clk_pre, u32 *clk_post) + u32 *clk_pre, u32 *clk_post) { if (!phy) return; + if (clk_pre) *clk_pre = phy->timing.clk_pre; if (clk_post) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h new file mode 100644 index 000000000000..0456b253239f --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DSI_PHY_H__ +#define __DSI_PHY_H__ + +#include <linux/regulator/consumer.h> + +#include "dsi.h" + +#define dsi_phy_read(offset) msm_readl((offset)) +#define dsi_phy_write(offset, data) msm_writel((data), (offset)) + +struct msm_dsi_phy_ops { + int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, + const unsigned long bit_rate, const unsigned long esc_rate); + void (*disable)(struct msm_dsi_phy *phy); +}; + +struct msm_dsi_phy_cfg { + enum msm_dsi_phy_type type; + struct dsi_reg_config reg_cfg; + struct msm_dsi_phy_ops ops; + + /* + * Each cell {phy_id, pll_id} of the truth table indicates + * if the source PLL selection bit should be set for each PHY. + * Fill default H/W values in illegal cells, eg. cell {0, 1}. + */ + bool src_pll_truthtable[DSI_MAX][DSI_MAX]; +}; + +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; + +struct msm_dsi_dphy_timing { + u32 clk_pre; + u32 clk_post; + u32 clk_zero; + u32 clk_trail; + u32 clk_prepare; + u32 hs_exit; + u32 hs_zero; + u32 hs_prepare; + u32 hs_trail; + u32 hs_rqst; + u32 ta_go; + u32 ta_sure; + u32 ta_get; +}; + +struct msm_dsi_phy { + struct platform_device *pdev; + void __iomem *base; + void __iomem *reg_base; + int id; + + struct clk *ahb_clk; + struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; + + struct msm_dsi_dphy_timing timing; + const struct msm_dsi_phy_cfg *cfg; + + bool regulator_ldo_mode; + + struct msm_dsi_pll *pll; +}; + +/* + * PHY internal functions + */ +int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, + const unsigned long bit_rate, const unsigned long esc_rate); +void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, + u32 bit_mask); + +#endif /* __DSI_PHY_H__ */ + diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c new file mode 100644 index 000000000000..2e9ba118d50a --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing) +{ + void __iomem *base = phy->base; + + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0, + DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1, + DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2, + DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); + if (timing->clk_zero & BIT(8)) + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3, + DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4, + DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5, + DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6, + DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7, + DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8, + DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9, + DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10, + DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11, + DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); +} + +static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->reg_base; + + if (!enable) { + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + return; + } + + if (phy->regulator_ldo_mode) { + dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d); + return; + } + + /* non LDO mode */ + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01); + dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03); +} + +static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + int i; + void __iomem *base = phy->base; + u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00}; + + DBG(""); + + if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + dsi_20nm_phy_regulator_ctrl(phy, true); + + dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff); + + msm_dsi_phy_set_src_pll(phy, src_pll_id, + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, + DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL); + + for (i = 0; i < 4; i++) { + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i), + (i >> 1) * 0x40); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0); + dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]); + } + + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00); + dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00); + + dsi_20nm_dphy_set_timing(phy, timing); + + dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00); + + dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06); + + /* make sure everything is written before enable */ + wmb(); + dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f); + + return 0; +} + +static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0); + dsi_20nm_phy_regulator_ctrl(phy, false); +} + +const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { + .type = MSM_DSI_PHY_20NM, + .src_pll_truthtable = { {false, true}, {false, true} }, + .reg_cfg = { + .num = 2, + .regs = { + {"vddio", 1800000, 1800000, 100000, 100}, + {"vcca", 1000000, 1000000, 10000, 100}, + }, + }, + .ops = { + .enable = dsi_20nm_phy_enable, + .disable = dsi_20nm_phy_disable, + } +}; + diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c new file mode 100644 index 000000000000..f1a7c7b46420 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing) +{ + void __iomem *base = phy->base; + + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0, + DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1, + DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2, + DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); + if (timing->clk_zero & BIT(8)) + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3, + DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4, + DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5, + DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6, + DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7, + DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8, + DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9, + DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10, + DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11, + DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); +} + +static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->reg_base; + + if (!enable) { + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + return; + } + + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); +} + +static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + int i; + void __iomem *base = phy->base; + + DBG(""); + + if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff); + + dsi_28nm_phy_regulator_ctrl(phy, true); + + dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); + + dsi_28nm_dphy_set_timing(phy, timing); + + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00); + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); + + dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6); + + for (i = 0; i < 4; i++) { + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97); + } + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf); + + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb); + + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); + + msm_dsi_phy_set_src_pll(phy, src_pll_id, + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, + DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL); + + return 0; +} + +static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0); + dsi_28nm_phy_regulator_ctrl(phy, false); + + /* + * Wait for the registers writes to complete in order to + * ensure that the phy is completely disabled + */ + wmb(); +} + +const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { + .type = MSM_DSI_PHY_28NM_HPM, + .src_pll_truthtable = { {true, true}, {false, true} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + }, +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { + .type = MSM_DSI_PHY_28NM_LP, + .src_pll_truthtable = { {true, true}, {true, true} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + }, +}; + diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 509376fdd112..5104fc9f9a53 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -72,31 +72,14 @@ long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw, int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw) { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); - int ret; - - /* - * Certain PLLs need to update the same VCO rate and registers - * after resume in suspend/resume scenario. - */ - if (pll->restore_state) { - ret = pll->restore_state(pll); - if (ret) - goto error; - } - ret = dsi_pll_enable(pll); - -error: - return ret; + return dsi_pll_enable(pll); } void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw) { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); - if (pll->save_state) - pll->save_state(pll); - dsi_pll_disable(pll); } @@ -134,6 +117,29 @@ void msm_dsi_pll_destroy(struct msm_dsi_pll *pll) pll->destroy(pll); } +void msm_dsi_pll_save_state(struct msm_dsi_pll *pll) +{ + if (pll->save_state) { + pll->save_state(pll); + pll->state_saved = true; + } +} + +int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) +{ + int ret; + + if (pll->restore_state && pll->state_saved) { + ret = pll->restore_state(pll); + if (ret) + return ret; + + pll->state_saved = false; + } + + return 0; +} + struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 5a3bb241c039..063caa2c5740 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -27,6 +27,7 @@ struct msm_dsi_pll { struct clk_hw clk_hw; bool pll_on; + bool state_saved; unsigned long min_rate; unsigned long max_rate; @@ -82,8 +83,16 @@ void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev, /* * Initialization for Each PLL Type */ +#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id); +#else +static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init( + struct platform_device *pdev, enum msm_dsi_phy_type type, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c index eb8ac3097ff5..1912cfcca48c 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c @@ -465,26 +465,21 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll) void __iomem *base = pll_28nm->mmio; int ret; - if ((cached_state->vco_rate != 0) && - (cached_state->vco_rate == __clk_get_rate(pll->clk_hw.clk))) { - ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw, - cached_state->vco_rate, 0); - if (ret) { - dev_err(&pll_28nm->pdev->dev, - "restore vco rate failed. ret=%d\n", ret); - return ret; - } - - pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG, - cached_state->postdiv3); - pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, - cached_state->postdiv1); - pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG, - cached_state->byte_mux); - - cached_state->vco_rate = 0; + ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw, + cached_state->vco_rate, 0); + if (ret) { + dev_err(&pll_28nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; } + pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG, + cached_state->postdiv3); + pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, + cached_state->postdiv1); + pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG, + cached_state->byte_mux); + return 0; } diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 26f268e2dd3d..06cbddfc914f 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) - -Copyright (C) 2013 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h index f9c71dceb5e2..bef1d65fe28c 100644 --- a/drivers/gpu/drm/msm/edp/edp.xml.h +++ b/drivers/gpu/drm/msm/edp/edp.xml.h @@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 7991069dd492..81200e9be382 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -373,7 +373,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl) struct device *dev = &ctrl->pdev->dev; int ret; - ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd"); + ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN); if (IS_ERR(ctrl->panel_hpd_gpio)) { ret = PTR_ERR(ctrl->panel_hpd_gpio); ctrl->panel_hpd_gpio = NULL; @@ -381,13 +381,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl) return ret; } - ret = gpiod_direction_input(ctrl->panel_hpd_gpio); - if (ret) { - pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret); - return ret; - } - - ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en"); + ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW); if (IS_ERR(ctrl->panel_en_gpio)) { ret = PTR_ERR(ctrl->panel_en_gpio); ctrl->panel_en_gpio = NULL; @@ -395,13 +389,6 @@ static int edp_gpio_config(struct edp_ctrl *ctrl) return ret; } - ret = gpiod_direction_output(ctrl->panel_en_gpio, 0); - if (ret) { - pr_err("%s: Set direction for panel_en failed, %d\n", - __func__, ret); - return ret; - } - DBG("gpio on"); return 0; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 814536202efe..101b324cdeef 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -22,7 +22,9 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) { uint32_t ctrl = 0; + unsigned long flags; + spin_lock_irqsave(&hdmi->reg_lock, flags); if (power_on) { ctrl |= HDMI_CTRL_ENABLE; if (!hdmi->hdmi_mode) { @@ -37,6 +39,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) } hdmi_write(hdmi, REG_HDMI_CTRL, ctrl); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); DBG("HDMI Core: %s, HDMI_CTRL=0x%08x", power_on ? "Enable" : "Disable", ctrl); } @@ -51,6 +54,10 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id) /* Process DDC: */ hdmi_i2c_irq(hdmi->i2c); + /* Process HDCP: */ + if (hdmi->hdcp_ctrl) + hdmi_hdcp_irq(hdmi->hdcp_ctrl); + /* TODO audio.. */ return IRQ_HANDLED; @@ -60,6 +67,15 @@ static void hdmi_destroy(struct hdmi *hdmi) { struct hdmi_phy *phy = hdmi->phy; + /* + * at this point, hpd has been disabled, + * after flush workq, it's safe to deinit hdcp + */ + if (hdmi->workq) { + flush_workqueue(hdmi->workq); + destroy_workqueue(hdmi->workq); + } + hdmi_hdcp_destroy(hdmi); if (phy) phy->funcs->destroy(phy); @@ -77,6 +93,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) { struct hdmi_platform_config *config = pdev->dev.platform_data; struct hdmi *hdmi = NULL; + struct resource *res; int i, ret; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); @@ -87,18 +104,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->pdev = pdev; hdmi->config = config; + spin_lock_init(&hdmi->reg_lock); /* not sure about which phy maps to which msm.. probably I miss some */ - if (config->phy_init) + if (config->phy_init) { hdmi->phy = config->phy_init(hdmi); - else - hdmi->phy = ERR_PTR(-ENXIO); - if (IS_ERR(hdmi->phy)) { - ret = PTR_ERR(hdmi->phy); - dev_err(&pdev->dev, "failed to load phy: %d\n", ret); - hdmi->phy = NULL; - goto fail; + if (IS_ERR(hdmi->phy)) { + ret = PTR_ERR(hdmi->phy); + dev_err(&pdev->dev, "failed to load phy: %d\n", ret); + hdmi->phy = NULL; + goto fail; + } } hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); @@ -107,6 +124,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } + /* HDCP needs physical address of hdmi register */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + config->mmio_name); + hdmi->mmio_phy_addr = res->start; + + hdmi->qfprom_mmio = msm_ioremap(pdev, + config->qfprom_mmio_name, "HDMI_QFPROM"); + if (IS_ERR(hdmi->qfprom_mmio)) { + dev_info(&pdev->dev, "can't find qfprom resource\n"); + hdmi->qfprom_mmio = NULL; + } + hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) * config->hpd_reg_cnt, GFP_KERNEL); if (!hdmi->hpd_regs) { @@ -189,6 +218,8 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->pwr_clks[i] = clk; } + hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); + hdmi->i2c = hdmi_i2c_init(hdmi); if (IS_ERR(hdmi->i2c)) { ret = PTR_ERR(hdmi->i2c); @@ -197,6 +228,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } + hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi); + if (IS_ERR(hdmi->hdcp_ctrl)) { + dev_warn(&pdev->dev, "failed to init hdcp: disabled\n"); + hdmi->hdcp_ctrl = NULL; + } + return hdmi; fail: @@ -310,7 +347,7 @@ static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"}; static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"}; static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; -static struct hdmi_platform_config hdmi_tx_8074_config = { +static struct hdmi_platform_config hdmi_tx_8974_config = { .phy_init = hdmi_phy_8x74_init, HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, 8x74), @@ -330,9 +367,21 @@ static struct hdmi_platform_config hdmi_tx_8084_config = { .hpd_freq = hpd_clk_freq_8x74, }; +static const char *hpd_reg_names_8x94[] = {}; + +static struct hdmi_platform_config hdmi_tx_8994_config = { + .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */ + HDMI_CFG(pwr_reg, 8x74), + HDMI_CFG(hpd_reg, 8x94), + HDMI_CFG(pwr_clk, 8x74), + HDMI_CFG(hpd_clk, 8x74), + .hpd_freq = hpd_clk_freq_8x74, +}; + static const struct of_device_id dt_match[] = { + { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config }, { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, - { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config }, + { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config }, { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config }, { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config }, {} @@ -347,8 +396,7 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char snprintf(name2, sizeof(name2), "%s-gpio", name); gpio = of_get_named_gpio(of_node, name2, 0); if (gpio < 0) { - dev_err(dev, "failed to get gpio: %s (%d)\n", - name, gpio); + DBG("failed to get gpio: %s (%d)", name, gpio); gpio = -1; } } @@ -376,6 +424,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) } hdmi_cfg->mmio_name = "core_physical"; + hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); @@ -391,7 +440,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) if (cpu_is_apq8064()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; - config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; @@ -404,7 +452,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; - config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; @@ -419,7 +466,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) "8901_hdmi_mvs", "8901_mpp0" }; config.phy_init = hdmi_phy_8x60_init; - config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; @@ -430,6 +476,9 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } + config.mmio_name = "hdmi_msm_hdmi_addr"; + config.qfprom_mmio_name = "hdmi_msm_qfprom_addr"; + hdmi_cfg = &config; #endif dev->platform_data = hdmi_cfg; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 68fdfb3622a5..d0e663192d01 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -37,6 +37,8 @@ struct hdmi_audio { int rate; }; +struct hdmi_hdcp_ctrl; + struct hdmi { struct drm_device *dev; struct platform_device *pdev; @@ -51,6 +53,8 @@ struct hdmi { unsigned long int pixclock; void __iomem *mmio; + void __iomem *qfprom_mmio; + phys_addr_t mmio_phy_addr; struct regulator **hpd_regs; struct regulator **pwr_regs; @@ -68,12 +72,25 @@ struct hdmi { bool hdmi_mode; /* are we in hdmi mode? */ int irq; + struct workqueue_struct *workq; + + struct hdmi_hdcp_ctrl *hdcp_ctrl; + + /* + * spinlock to protect registers shared by different execution + * REG_HDMI_CTRL + * REG_HDMI_DDC_ARBITRATION + * REG_HDMI_HDCP_INT_CTRL + * REG_HDMI_HPD_CTRL + */ + spinlock_t reg_lock; }; /* platform config data (ie. from DT, or pdata) */ struct hdmi_platform_config { struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); const char *mmio_name; + const char *qfprom_mmio_name; /* regulators that need to be on for hpd: */ const char **hpd_reg_names; @@ -109,6 +126,11 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) return msm_readl(hdmi->mmio + reg); } +static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg) +{ + return msm_readl(hdmi->qfprom_mmio + reg); +} + /* * The phy appears to be different, for example between 8960 and 8x60, * so split the phy related functions out and load the correct one at @@ -117,7 +139,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) struct hdmi_phy_funcs { void (*destroy)(struct hdmi_phy *phy); - void (*reset)(struct hdmi_phy *phy); void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock); void (*powerdown)(struct hdmi_phy *phy); }; @@ -163,4 +184,13 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c); void hdmi_i2c_destroy(struct i2c_adapter *i2c); struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi); +/* + * hdcp + */ +struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi); +void hdmi_hdcp_destroy(struct hdmi *hdmi); +void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl); +void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl); +void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl); + #endif /* __HDMI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index e6f034808371..0b1b5586ff35 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -441,6 +441,12 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) #define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288 +#define REG_HDMI_CEC_CTRL 0x0000028c + +#define REG_HDMI_CEC_WR_DATA 0x00000290 + +#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294 + #define REG_HDMI_CEC_STATUS 0x00000298 #define REG_HDMI_CEC_INT 0x0000029c diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index 872485f60134..df232e20c13e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -203,7 +203,6 @@ int hdmi_audio_update(struct hdmi *hdmi) audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4); audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE; } else { - hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE); acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT; acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND; vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index a7a1d8267cf0..92b69ae8caf9 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -100,8 +100,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) hdmi_audio_update(hdmi); } - phy->funcs->powerup(phy, hdmi->pixclock); + if (phy) + phy->funcs->powerup(phy, hdmi->pixclock); + hdmi_set_mode(hdmi, true); + + if (hdmi->hdcp_ctrl) + hdmi_hdcp_on(hdmi->hdcp_ctrl); } static void hdmi_bridge_enable(struct drm_bridge *bridge) @@ -118,9 +123,14 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge) struct hdmi *hdmi = hdmi_bridge->hdmi; struct hdmi_phy *phy = hdmi->phy; + if (hdmi->hdcp_ctrl) + hdmi_hdcp_off(hdmi->hdcp_ctrl); + DBG("power down"); hdmi_set_mode(hdmi, false); - phy->funcs->powerdown(phy); + + if (phy) + phy->funcs->powerdown(phy); if (hdmi->power_on) { power_off(bridge); @@ -142,8 +152,6 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge, hdmi->pixclock = mode->clock * 1000; - hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1; - hstart = mode->htotal - mode->hsync_start; hend = mode->htotal - mode->hsync_start + mode->hdisplay; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 54aa93ff5473..a3b05ae52dae 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -28,6 +28,55 @@ struct hdmi_connector { }; #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) +static void hdmi_phy_reset(struct hdmi *hdmi) +{ + unsigned int val; + + val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } + + if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET_PLL); + } else { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET_PLL); + } + + msleep(100); + + if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET); + } else { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET); + } + + if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { + /* pull high */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val | HDMI_PHY_CTRL_SW_RESET_PLL); + } else { + /* pull low */ + hdmi_write(hdmi, REG_HDMI_PHY_CTRL, + val & ~HDMI_PHY_CTRL_SW_RESET_PLL); + } +} + static int gpio_config(struct hdmi *hdmi, bool on) { struct device *dev = &hdmi->pdev->dev; @@ -35,21 +84,25 @@ static int gpio_config(struct hdmi *hdmi, bool on) int ret; if (on) { - ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); - goto error1; + if (config->ddc_clk_gpio != -1) { + ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); + if (ret) { + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", + "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); + goto error1; + } + gpio_set_value_cansleep(config->ddc_clk_gpio, 1); } - gpio_set_value_cansleep(config->ddc_clk_gpio, 1); - ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_DATA", config->ddc_data_gpio, ret); - goto error2; + if (config->ddc_data_gpio != -1) { + ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); + if (ret) { + dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", + "HDMI_DDC_DATA", config->ddc_data_gpio, ret); + goto error2; + } + gpio_set_value_cansleep(config->ddc_data_gpio, 1); } - gpio_set_value_cansleep(config->ddc_data_gpio, 1); ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); if (ret) { @@ -94,8 +147,12 @@ static int gpio_config(struct hdmi *hdmi, bool on) } DBG("gpio on"); } else { - gpio_free(config->ddc_clk_gpio); - gpio_free(config->ddc_data_gpio); + if (config->ddc_clk_gpio != -1) + gpio_free(config->ddc_clk_gpio); + + if (config->ddc_data_gpio != -1) + gpio_free(config->ddc_data_gpio); + gpio_free(config->hpd_gpio); if (config->mux_en_gpio != -1) { @@ -126,9 +183,11 @@ error5: error4: gpio_free(config->hpd_gpio); error3: - gpio_free(config->ddc_data_gpio); + if (config->ddc_data_gpio != -1) + gpio_free(config->ddc_data_gpio); error2: - gpio_free(config->ddc_clk_gpio); + if (config->ddc_clk_gpio != -1) + gpio_free(config->ddc_clk_gpio); error1: return ret; } @@ -138,9 +197,9 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; - struct hdmi_phy *phy = hdmi->phy; uint32_t hpd_ctrl; int i, ret; + unsigned long flags; for (i = 0; i < config->hpd_reg_cnt; i++) { ret = regulator_enable(hdmi->hpd_regs[i]); @@ -181,7 +240,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) } hdmi_set_mode(hdmi, false); - phy->funcs->reset(phy); + hdmi_phy_reset(hdmi); hdmi_set_mode(hdmi, true); hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); @@ -192,6 +251,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) HDMI_HPD_INT_CTRL_INT_EN); /* set timeout to 4.1ms (max) for hardware debounce */ + spin_lock_irqsave(&hdmi->reg_lock, flags); hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff); @@ -200,6 +260,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl); hdmi_write(hdmi, REG_HDMI_HPD_CTRL, HDMI_HPD_CTRL_ENABLE | hpd_ctrl); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); return 0; @@ -250,7 +311,6 @@ hotplug_work(struct work_struct *work) void hdmi_connector_irq(struct drm_connector *connector) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct msm_drm_private *priv = connector->dev->dev_private; struct hdmi *hdmi = hdmi_connector->hdmi; uint32_t hpd_int_status, hpd_int_ctrl; @@ -274,7 +334,7 @@ void hdmi_connector_irq(struct drm_connector *connector) hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); - queue_work(priv->wq, &hdmi_connector->hpd_work); + queue_work(hdmi->workq, &hdmi_connector->hpd_work); } } @@ -350,6 +410,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector) hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); + hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); drm_mode_connector_update_edid_property(connector, edid); if (edid) { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c new file mode 100644 index 000000000000..1dc9c34eb0df --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c @@ -0,0 +1,1437 @@ +/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "hdmi.h" +#include <linux/qcom_scm.h> + +#define HDCP_REG_ENABLE 0x01 +#define HDCP_REG_DISABLE 0x00 +#define HDCP_PORT_ADDR 0x74 + +#define HDCP_INT_STATUS_MASK ( \ + HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \ + HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \ + HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \ + HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT) + +#define AUTH_WORK_RETRIES_TIME 100 +#define AUTH_RETRIES_TIME 30 + +/* QFPROM Registers for HDMI/HDCP */ +#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB 0x000000F8 +#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB 0x000000FC +#define HDCP_KSV_LSB 0x000060D8 +#define HDCP_KSV_MSB 0x000060DC + +enum DS_TYPE { /* type of downstream device */ + DS_UNKNOWN, + DS_RECEIVER, + DS_REPEATER, +}; + +enum hdmi_hdcp_state { + HDCP_STATE_NO_AKSV, + HDCP_STATE_INACTIVE, + HDCP_STATE_AUTHENTICATING, + HDCP_STATE_AUTHENTICATED, + HDCP_STATE_AUTH_FAILED +}; + +struct hdmi_hdcp_reg_data { + u32 reg_id; + u32 off; + char *name; + u32 reg_val; +}; + +struct hdmi_hdcp_ctrl { + struct hdmi *hdmi; + u32 auth_retries; + bool tz_hdcp; + enum hdmi_hdcp_state hdcp_state; + struct work_struct hdcp_auth_work; + struct work_struct hdcp_reauth_work; + +#define AUTH_ABORT_EV 1 +#define AUTH_RESULT_RDY_EV 2 + unsigned long auth_event; + wait_queue_head_t auth_event_queue; + + u32 ksv_fifo_w_index; + /* + * store aksv from qfprom + */ + u32 aksv_lsb; + u32 aksv_msb; + bool aksv_valid; + u32 ds_type; + u32 bksv_lsb; + u32 bksv_msb; + u8 dev_count; + u8 depth; + u8 ksv_list[5 * 127]; + bool max_cascade_exceeded; + bool max_dev_exceeded; +}; + +static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len) +{ + int rc; + int retry = 5; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + .buf = &offset, + }, { + .addr = addr >> 1, + .flags = I2C_M_RD, + .len = data_len, + .buf = data, + } + }; + + DBG("Start DDC read"); +retry: + rc = i2c_transfer(hdmi->i2c, msgs, 2); + + retry--; + if (rc == 2) + rc = 0; + else if (retry > 0) + goto retry; + else + rc = -EIO; + + DBG("End DDC read %d", rc); + + return rc; +} + +#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32 + +static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len) +{ + int rc; + int retry = 10; + u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM]; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + } + }; + + DBG("Start DDC write"); + if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) { + pr_err("%s: write size too big\n", __func__); + return -ERANGE; + } + + buf[0] = offset; + memcpy(&buf[1], data, data_len); + msgs[0].buf = buf; + msgs[0].len = data_len + 1; +retry: + rc = i2c_transfer(hdmi->i2c, msgs, 1); + + retry--; + if (rc == 1) + rc = 0; + else if (retry > 0) + goto retry; + else + rc = -EIO; + + DBG("End DDC write %d", rc); + + return rc; +} + +static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg, + u32 *pdata, u32 count) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT]; + u32 resp, phy_addr, idx = 0; + int i, ret = 0; + + WARN_ON(!pdata || !preg || (count == 0)); + + if (hdcp_ctrl->tz_hdcp) { + phy_addr = (u32)hdmi->mmio_phy_addr; + + while (count) { + memset(scm_buf, 0, sizeof(scm_buf)); + for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT; + i++) { + scm_buf[i].addr = phy_addr + preg[idx]; + scm_buf[i].val = pdata[idx]; + idx++; + } + ret = qcom_scm_hdcp_req(scm_buf, i, &resp); + + if (ret || resp) { + pr_err("%s: error: scm_call ret=%d resp=%u\n", + __func__, ret, resp); + ret = -EINVAL; + break; + } + + count -= i; + } + } else { + for (i = 0; i < count; i++) + hdmi_write(hdmi, preg[i], pdata[i]); + } + + return ret; +} + +void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val, hdcp_int_status; + unsigned long flags; + + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL); + hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK; + if (!hdcp_int_status) { + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + return; + } + /* Clear Interrupts */ + reg_val |= hdcp_int_status << 1; + /* Clear AUTH_FAIL_INFO as well */ + if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) + reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK; + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + DBG("hdcp irq %x", hdcp_int_status); + + if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) { + pr_info("%s:AUTH_SUCCESS_INT received\n", __func__); + if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) { + set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event); + wake_up_all(&hdcp_ctrl->auth_event_queue); + } + } + + if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) { + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n", + __func__, reg_val); + if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state) + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work); + else if (HDCP_STATE_AUTHENTICATING == + hdcp_ctrl->hdcp_state) { + set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event); + wake_up_all(&hdcp_ctrl->auth_event_queue); + } + } +} + +static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev) +{ + int rc; + + rc = wait_event_timeout(hdcp_ctrl->auth_event_queue, + !!test_bit(ev, &hdcp_ctrl->auth_event), + msecs_to_jiffies(ms)); + if (rc) { + pr_info("%s: msleep is canceled by event %d\n", + __func__, ev); + clear_bit(ev, &hdcp_ctrl->auth_event); + return -ECANCELED; + } + + return 0; +} + +static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + + /* Fetch aksv from QFPROM, this info should be public. */ + hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB); + hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB); + + /* check there are 20 ones in AKSV */ + if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb)) + != 20) { + pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n", + __func__); + pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n", + __func__, hdcp_ctrl->aksv_msb, + hdcp_ctrl->aksv_lsb); + return -EINVAL; + } + DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb); + + return 0; +} + +static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val, failure, nack0; + int rc = 0; + + /* Check for any DDC transfer failures */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS); + failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED; + nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0; + DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d", + reg_val, failure, nack0); + + if (failure) { + /* + * Indicates that the last HDCP HW DDC transfer failed. + * This occurs when a transfer is attempted with HDCP DDC + * disabled (HDCP_DDC_DISABLE=1) or the number of retries + * matches HDCP_DDC_RETRY_CNT. + * Failure occurred, let's clear it. + */ + DBG("DDC failure detected"); + + /* First, Disable DDC */ + hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, + HDMI_HDCP_DDC_CTRL_0_DISABLE); + + /* ACK the Failure to Clear it */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1); + reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK; + hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val); + + /* Check if the FAILURE got Cleared */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS); + if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED) + pr_info("%s: Unable to clear HDCP DDC Failure\n", + __func__); + + /* Re-Enable HDCP DDC */ + hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0); + } + + if (nack0) { + DBG("Before: HDMI_DDC_SW_STATUS=0x%08x", + hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS)); + /* Reset HDMI DDC software status */ + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + + /* Reset HDMI DDC Controller */ + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val |= HDMI_DDC_CTRL_SOFT_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + + /* If previous msleep is aborted, skip this msleep */ + if (!rc) + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); + reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET; + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); + DBG("After: HDMI_DDC_SW_STATUS=0x%08x", + hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS)); + } + + return rc; +} + +static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + u32 hdcp_ddc_status, ddc_hw_status; + u32 xfer_done, xfer_req, hw_done; + bool hw_not_ready; + u32 timeout_count; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + + if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0) + return 0; + + /* Wait to be clean on DDC HW engine */ + timeout_count = 100; + do { + hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS); + ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS); + + xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE; + xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ; + hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE; + hw_not_ready = !xfer_done || xfer_req || !hw_done; + + if (hw_not_ready) + break; + + timeout_count--; + if (!timeout_count) { + pr_warn("%s: hw_ddc_clean failed\n", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static void hdmi_hdcp_reauth_work(struct work_struct *work) +{ + struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, + struct hdmi_hdcp_ctrl, hdcp_reauth_work); + struct hdmi *hdmi = hdcp_ctrl->hdmi; + unsigned long flags; + u32 reg_val; + + DBG("HDCP REAUTH WORK"); + /* + * Disable HPD circuitry. + * This is needed to reset the HDCP cipher engine so that when we + * attempt a re-authentication, HW would clear the AN0_READY and + * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register + */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val &= ~HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + + /* Disable HDCP interrupts */ + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdmi_write(hdmi, REG_HDMI_HDCP_RESET, + HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE); + + /* Wait to be clean on DDC HW engine */ + if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) { + pr_info("%s: reauth work aborted\n", __func__); + return; + } + + /* Disable encryption and disable the HDCP block */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0); + + /* Enable HPD circuitry */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val |= HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* + * Only retry defined times then abort current authenticating process + */ + if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) { + hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE; + hdcp_ctrl->auth_retries = 0; + pr_info("%s: abort reauthentication!\n", __func__); + + return; + } + + DBG("Queue AUTH WORK"); + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING; + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work); +} + +static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status; + u32 reg_val; + unsigned long flags; + int rc; + + if (!hdcp_ctrl->aksv_valid) { + rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl); + if (rc) { + pr_err("%s: ASKV validation failed\n", __func__); + hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV; + return -ENOTSUPP; + } + hdcp_ctrl->aksv_valid = true; + } + + spin_lock_irqsave(&hdmi->reg_lock, flags); + /* disable HDMI Encrypt */ + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + + /* Enabling Software DDC */ + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION); + reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION; + hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* + * Write AKSV read from QFPROM to the HDCP registers. + * This step is needed for HDCP authentication and must be + * written before enabling HDCP. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb); + hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb); + + /* + * HDCP setup prior to enabling HDCP_CTRL. + * Setup seed values for random number An. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF); + hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE); + + /* Disable the RngCipher state */ + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL); + reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER; + hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val); + DBG("HDCP_DEBUG_CTRL=0x%08x", + hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL)); + + /* + * Ensure that all register writes are completed before + * enabling HDCP cipher + */ + wmb(); + + /* + * Enable HDCP + * This needs to be done as early as possible in order for the + * hardware to make An available to read + */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE); + + /* + * If we had stale values for the An ready bit, it should most + * likely be cleared now after enabling HDCP cipher + */ + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + DBG("After enabling HDCP Link0_Status=0x%08x", link0_status); + if (!(link0_status & + (HDMI_HDCP_LINK0_STATUS_AN_0_READY | + HDMI_HDCP_LINK0_STATUS_AN_1_READY))) + DBG("An not ready after enabling HDCP"); + + /* Clear any DDC failures from previous tries before enable HDCP*/ + rc = reset_hdcp_ddc_failures(hdcp_ctrl); + + return rc; +} + +static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val; + unsigned long flags; + + DBG("hdcp auth failed, queue reauth work"); + /* clear HDMI Encrypt */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED; + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work); +} + +static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val; + unsigned long flags; + + /* + * Disable software DDC before going into part3 to make sure + * there is no Arbitration between software and hardware for DDC + */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION); + reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION; + hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* enable HDMI Encrypt */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val |= HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED; + hdcp_ctrl->auth_retries = 0; +} + +/* + * hdcp authenticating part 1 + * Wait Key/An ready + * Read BCAPS from sink + * Write BCAPS and AKSV into HDCP engine + * Write An and AKSV to sink + * Read BKSV from sink and write into HDCP engine + */ +static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status, keys_state; + u32 timeout_count; + bool an_ready; + + /* Wait for HDCP keys to be checked and validated */ + timeout_count = 100; + do { + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + keys_state = (link0_status >> 28) & 0x7; + if (keys_state == HDCP_KEYS_STATE_VALID) + break; + + DBG("Keys not ready(%d). s=%d, l0=%0x08x", + timeout_count, keys_state, link0_status); + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Wait key state timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + timeout_count = 100; + do { + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY) + && (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY); + if (an_ready) + break; + + DBG("An not ready(%d). l0_status=0x%08x", + timeout_count, link0_status); + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Wait An timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_aksv_0, link0_aksv_1; + u32 link0_an[2]; + u8 aksv[5]; + + /* Read An0 and An1 */ + link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5); + link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6); + + /* Read AKSV */ + link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3); + link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4); + + DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1); + /* Copy An and AKSV to byte arrays for transmission */ + aksv[0] = link0_aksv_0 & 0xFF; + aksv[1] = (link0_aksv_0 >> 8) & 0xFF; + aksv[2] = (link0_aksv_0 >> 16) & 0xFF; + aksv[3] = (link0_aksv_0 >> 24) & 0xFF; + aksv[4] = link0_aksv_1 & 0xFF; + + /* Write An to offset 0x18 */ + rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an, + (u16)sizeof(link0_an)); + if (rc) { + pr_err("%s:An write failed\n", __func__); + return rc; + } + DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]); + + /* Write AKSV to offset 0x10 */ + rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5); + if (rc) { + pr_err("%s:AKSV write failed\n", __func__); + return rc; + } + DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0); + + return 0; +} + +static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u8 bksv[5]; + u32 reg[2], data[2]; + + /* Read BKSV at offset 0x00 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5); + if (rc) { + pr_err("%s:BKSV read failed\n", __func__); + return rc; + } + + hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) | + (bksv[2] << 16) | (bksv[3] << 24); + hdcp_ctrl->bksv_msb = bksv[4]; + DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb); + + /* check there are 20 ones in BKSV */ + if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb)) + != 20) { + pr_err(": BKSV doesn't have 20 1's and 20 0's\n"); + pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n", + bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]); + return -EINVAL; + } + + /* Write BKSV read from sink to HDCP registers */ + reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0; + data[0] = hdcp_ctrl->bksv_lsb; + reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1; + data[1] = hdcp_ctrl->bksv_msb; + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); + + return rc; +} + +static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg, data; + u8 bcaps; + + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); + if (rc) { + pr_err("%s:BCAPS read failed\n", __func__); + return rc; + } + DBG("BCAPS=%02x", bcaps); + + /* receiver (0), repeater (1) */ + hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER; + + /* Write BCAPS to the hardware */ + reg = REG_HDMI_HDCP_RCVPORT_DATA12; + data = (u32)bcaps; + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + + return rc; +} + +static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + unsigned long flags; + int rc; + + /* Wait for AKSV key and An ready */ + rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl); + if (rc) { + pr_err("%s: wait key and an ready failed\n", __func__); + return rc; + }; + + /* Read BCAPS and send to HDCP engine */ + rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl); + if (rc) { + pr_err("%s: read bcaps error, abort\n", __func__); + return rc; + } + + /* + * 1.1_Features turned off by default. + * No need to write AInfo since 1.1_Features is disabled. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0); + + /* Send AKSV and An to sink */ + rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl); + if (rc) { + pr_err("%s:An/Aksv write failed\n", __func__); + return rc; + } + + /* Read BKSV and send to HDCP engine*/ + rc = hdmi_hdcp_recv_bksv(hdcp_ctrl); + if (rc) { + pr_err("%s:BKSV Process failed\n", __func__); + return rc; + } + + /* Enable HDCP interrupts and ack/clear any stale interrupts */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, + HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK | + HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK | + HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK | + HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK | + HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + return 0; +} + +/* read R0' from sink and pass it to HDCP engine */ +static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + int rc = 0; + u8 buf[2]; + + /* + * HDCP Compliance Test case 1A-01: + * Wait here at least 100ms before reading R0' + */ + rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV); + if (rc) + return rc; + + /* Read R0' at offset 0x08 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2); + if (rc) { + pr_err("%s:R0' read failed\n", __func__); + return rc; + } + DBG("R0'=%02x%02x", buf[1], buf[0]); + + /* Write R0' to HDCP registers and check to see if it is a match */ + hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0, + (((u32)buf[1]) << 8) | buf[0]); + + return 0; +} + +/* Wait for authenticating result: R0/R0' are matched or not */ +static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status; + int rc; + + /* wait for hdcp irq, 10 sec should be long enough */ + rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV); + if (!rc) { + pr_err("%s: Wait Auth IRQ timeout\n", __func__); + return -ETIMEDOUT; + } + + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) { + pr_err("%s: Authentication Part I failed\n", __func__); + return -EINVAL; + } + + /* Enable HDCP Encryption */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, + HDMI_HDCP_CTRL_ENABLE | + HDMI_HDCP_CTRL_ENCRYPTION_ENABLE); + + return 0; +} + +static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl, + u16 *pbstatus) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + bool max_devs_exceeded = false, max_cascade_exceeded = false; + u32 repeater_cascade_depth = 0, down_stream_devices = 0; + u16 bstatus; + u8 buf[2]; + + /* Read BSTATUS at offset 0x41 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2); + if (rc) { + pr_err("%s: BSTATUS read failed\n", __func__); + goto error; + } + *pbstatus = bstatus = (buf[1] << 8) | buf[0]; + + + down_stream_devices = bstatus & 0x7F; + repeater_cascade_depth = (bstatus >> 8) & 0x7; + max_devs_exceeded = (bstatus & BIT(7)) ? true : false; + max_cascade_exceeded = (bstatus & BIT(11)) ? true : false; + + if (down_stream_devices == 0) { + /* + * If no downstream devices are attached to the repeater + * then part II fails. + * todo: The other approach would be to continue PART II. + */ + pr_err("%s: No downstream devices\n", __func__); + rc = -EINVAL; + goto error; + } + + /* + * HDCP Compliance 1B-05: + * Check if no. of devices connected to repeater + * exceed max_devices_connected from bit 7 of Bstatus. + */ + if (max_devs_exceeded) { + pr_err("%s: no. of devs connected exceeds max allowed", + __func__); + rc = -EINVAL; + goto error; + } + + /* + * HDCP Compliance 1B-06: + * Check if no. of cascade connected to repeater + * exceed max_cascade_connected from bit 11 of Bstatus. + */ + if (max_cascade_exceeded) { + pr_err("%s: no. of cascade conn exceeds max allowed", + __func__); + rc = -EINVAL; + goto error; + } + +error: + hdcp_ctrl->dev_count = down_stream_devices; + hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded; + hdcp_ctrl->max_dev_exceeded = max_devs_exceeded; + hdcp_ctrl->depth = repeater_cascade_depth; + return rc; +} + +static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( + struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg, data; + u32 timeout_count; + u16 bstatus; + u8 bcaps; + + /* + * Wait until READY bit is set in BCAPS, as per HDCP specifications + * maximum permitted time to check for READY bit is five seconds. + */ + timeout_count = 100; + do { + /* Read BCAPS at offset 0x40 */ + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); + if (rc) { + pr_err("%s: BCAPS read failed\n", __func__); + return rc; + } + + if (bcaps & BIT(5)) + break; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Wait KSV fifo ready timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus); + if (rc) { + pr_err("%s: bstatus error\n", __func__); + return rc; + } + + /* Write BSTATUS and BCAPS to HDCP registers */ + reg = REG_HDMI_HDCP_RCVPORT_DATA12; + data = bcaps | (bstatus << 8); + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + if (rc) { + pr_err("%s: BSTATUS write failed\n", __func__); + return rc; + } + + return 0; +} + +/* + * hdcp authenticating part 2: 2nd + * read ksv fifo from sink + * transfer V' from sink to HDCP engine + * reset SHA engine + */ +static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + int rc = 0; + struct hdmi_hdcp_reg_data reg_data[] = { + {REG_HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"}, + {REG_HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"}, + {REG_HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"}, + {REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"}, + {REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"}, + }; + struct hdmi_hdcp_reg_data *rd; + u32 size = ARRAY_SIZE(reg_data); + u32 reg[ARRAY_SIZE(reg_data)]; + u32 data[ARRAY_SIZE(reg_data)]; + int i; + + for (i = 0; i < size; i++) { + rd = ®_data[i]; + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, + rd->off, (u8 *)&data[i], (u16)sizeof(data[i])); + if (rc) { + pr_err("%s: Read %s failed\n", __func__, rd->name); + goto error; + } + + DBG("%s =%x", rd->name, data[i]); + reg[i] = reg_data[i].reg_id; + } + + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size); + +error: + return rc; +} + +static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 ksv_bytes; + + ksv_bytes = 5 * hdcp_ctrl->dev_count; + + rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43, + hdcp_ctrl->ksv_list, ksv_bytes); + if (rc) + pr_err("%s: KSV FIFO read failed\n", __func__); + + return rc; +} + +static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + u32 reg[2], data[2]; + u32 rc = 0; + + reg[0] = REG_HDMI_HDCP_SHA_CTRL; + data[0] = HDCP_REG_ENABLE; + reg[1] = REG_HDMI_HDCP_SHA_CTRL; + data[1] = HDCP_REG_DISABLE; + + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); + + return rc; +} + +static int hdmi_hdcp_auth_part2_recv_ksv_fifo( + struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + u32 timeout_count; + + /* + * Read KSV FIFO over DDC + * Key Selection vector FIFO Used to pull downstream KSVs + * from HDCP Repeaters. + * All bytes (DEVICE_COUNT * 5) must be read in a single, + * auto incrementing access. + * All bytes read as 0x00 for HDCP Receivers that are not + * HDCP Repeaters (REPEATER == 0). + */ + timeout_count = 100; + do { + rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl); + if (!rc) + break; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Recv ksv fifo timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl); + if (rc) { + pr_err("%s: transfer V failed\n", __func__); + return rc; + } + + /* reset SHA engine before write ksv fifo */ + rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl); + if (rc) { + pr_err("%s: fail to reset sha engine\n", __func__); + return rc; + } + + return 0; +} + +/* + * Write KSV FIFO to HDCP_SHA_DATA. + * This is done 1 byte at time starting with the LSB. + * Once 64 bytes have been written, we need to poll for + * HDCP_SHA_BLOCK_DONE before writing any further + * If the last byte is written, we need to poll for + * HDCP_SHA_COMP_DONE to wait until HW finish + */ +static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int i; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 ksv_bytes, last_byte = 0; + u8 *ksv_fifo = NULL; + u32 reg_val, data, reg; + u32 rc = 0; + + ksv_bytes = 5 * hdcp_ctrl->dev_count; + + /* Check if need to wait for HW completion */ + if (hdcp_ctrl->ksv_fifo_w_index) { + reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS); + DBG("HDCP_SHA_STATUS=%08x", reg_val); + if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) { + /* check COMP_DONE if last write */ + if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) { + DBG("COMP_DONE"); + return 0; + } else { + return -EAGAIN; + } + } else { + /* check BLOCK_DONE if not last write */ + if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE)) + return -EAGAIN; + + DBG("BLOCK_DONE"); + } + } + + ksv_bytes -= hdcp_ctrl->ksv_fifo_w_index; + if (ksv_bytes <= 64) + last_byte = 1; + else + ksv_bytes = 64; + + ksv_fifo = hdcp_ctrl->ksv_list; + ksv_fifo += hdcp_ctrl->ksv_fifo_w_index; + + for (i = 0; i < ksv_bytes; i++) { + /* Write KSV byte and set DONE bit[0] for last byte*/ + reg_val = ksv_fifo[i] << 16; + if ((i == (ksv_bytes - 1)) && last_byte) + reg_val |= HDMI_HDCP_SHA_DATA_DONE; + + reg = REG_HDMI_HDCP_SHA_DATA; + data = reg_val; + rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + + if (rc) + return rc; + } + + hdcp_ctrl->ksv_fifo_w_index += ksv_bytes; + + /* + *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE + */ + return -EAGAIN; +} + +/* write ksv fifo into HDCP engine */ +static int hdmi_hdcp_auth_part2_write_ksv_fifo( + struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc; + u32 timeout_count; + + hdcp_ctrl->ksv_fifo_w_index = 0; + timeout_count = 100; + do { + rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl); + if (!rc) + break; + + if (rc != -EAGAIN) + return rc; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: Write KSV fifo timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + int rc = 0; + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 link0_status; + u32 timeout_count = 100; + + do { + link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS); + if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES) + break; + + timeout_count--; + if (!timeout_count) { + pr_err("%s: HDCP V Match timedout", __func__); + return -ETIMEDOUT; + } + + rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + if (rc) + return rc; + } while (1); + + return 0; +} + +static void hdmi_hdcp_auth_work(struct work_struct *work) +{ + struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, + struct hdmi_hdcp_ctrl, hdcp_auth_work); + int rc; + + rc = hdmi_hdcp_auth_prepare(hdcp_ctrl); + if (rc) { + pr_err("%s: auth prepare failed %d\n", __func__, rc); + goto end; + } + + /* HDCP PartI */ + rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl); + if (rc) { + pr_err("%s: key exchange failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl); + if (rc) { + pr_err("%s: receive r0 failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl); + if (rc) { + pr_err("%s: verify r0 failed %d\n", __func__, rc); + goto end; + } + pr_info("%s: Authentication Part I successful\n", __func__); + if (hdcp_ctrl->ds_type == DS_RECEIVER) + goto end; + + /* HDCP PartII */ + rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl); + if (rc) { + pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl); + if (rc) { + pr_err("%s: recv ksv fifo failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl); + if (rc) { + pr_err("%s: write ksv fifo failed %d\n", __func__, rc); + goto end; + } + + rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl); + if (rc) + pr_err("%s: check v match failed %d\n", __func__, rc); + +end: + if (rc == -ECANCELED) { + pr_info("%s: hdcp authentication canceled\n", __func__); + } else if (rc == -ENOTSUPP) { + pr_info("%s: hdcp is not supported\n", __func__); + } else if (rc) { + pr_err("%s: hdcp authentication failed\n", __func__); + hdmi_hdcp_auth_fail(hdcp_ctrl); + } else { + hdmi_hdcp_auth_done(hdcp_ctrl); + } +} + +void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + u32 reg_val; + unsigned long flags; + + if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) || + (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) { + DBG("still active or activating or no askv. returning"); + return; + } + + /* clear HDMI Encrypt */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->auth_event = 0; + hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING; + hdcp_ctrl->auth_retries = 0; + queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work); +} + +void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) +{ + struct hdmi *hdmi = hdcp_ctrl->hdmi; + unsigned long flags; + u32 reg_val; + + if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) || + (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) { + DBG("hdcp inactive or no aksv. returning"); + return; + } + + /* + * Disable HPD circuitry. + * This is needed to reset the HDCP cipher engine so that when we + * attempt a re-authentication, HW would clear the AN0_READY and + * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register + */ + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val &= ~HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + + /* + * Disable HDCP interrupts. + * Also, need to set the state to inactive here so that any ongoing + * reauth works will know that the HDCP session has been turned off. + */ + hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + /* + * Cancel any pending auth/reauth attempts. + * If one is ongoing, this will wait for it to finish. + * No more reauthentication attempts will be scheduled since we + * set the current state to inactive. + */ + set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event); + wake_up_all(&hdcp_ctrl->auth_event_queue); + cancel_work_sync(&hdcp_ctrl->hdcp_auth_work); + cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work); + + hdmi_write(hdmi, REG_HDMI_HDCP_RESET, + HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE); + + /* Disable encryption and disable the HDCP block */ + hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0); + + spin_lock_irqsave(&hdmi->reg_lock, flags); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~HDMI_CTRL_ENCRYPTED; + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + + /* Enable HPD circuitry */ + reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL); + reg_val |= HDMI_HPD_CTRL_ENABLE; + hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val); + spin_unlock_irqrestore(&hdmi->reg_lock, flags); + + hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE; + + DBG("HDCP: Off"); +} + +struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi) +{ + struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL; + + if (!hdmi->qfprom_mmio) { + pr_err("%s: HDCP is not supported without qfprom\n", + __func__); + return ERR_PTR(-EINVAL); + } + + hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL); + if (!hdcp_ctrl) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work); + INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work); + init_waitqueue_head(&hdcp_ctrl->auth_event_queue); + hdcp_ctrl->hdmi = hdmi; + hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE; + hdcp_ctrl->aksv_valid = false; + + if (qcom_scm_hdcp_available()) + hdcp_ctrl->tz_hdcp = true; + else + hdcp_ctrl->tz_hdcp = false; + + return hdcp_ctrl; +} + +void hdmi_hdcp_destroy(struct hdmi *hdmi) +{ + if (hdmi && hdmi->hdcp_ctrl) { + kfree(hdmi->hdcp_ctrl); + hdmi->hdcp_ctrl = NULL; + } +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index 6997ec636c6d..3a01cb5051e2 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -426,57 +426,6 @@ static void hdmi_phy_8960_destroy(struct hdmi_phy *phy) kfree(phy_8960); } -static void hdmi_phy_8960_reset(struct hdmi_phy *phy) -{ - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - struct hdmi *hdmi = phy_8960->hdmi; - unsigned int val; - - val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } - - msleep(100); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } -} - static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { @@ -511,7 +460,6 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy) static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = { .destroy = hdmi_phy_8960_destroy, - .reset = hdmi_phy_8960_reset, .powerup = hdmi_phy_8960_powerup, .powerdown = hdmi_phy_8960_powerdown, }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c index 391433c1af7c..cb01421ae1e4 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c @@ -29,37 +29,6 @@ static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy) kfree(phy_8x60); } -static void hdmi_phy_8x60_reset(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - struct hdmi *hdmi = phy_8x60->hdmi; - unsigned int val; - - val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } - - msleep(100); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } -} - static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { @@ -182,7 +151,6 @@ static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy) static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = { .destroy = hdmi_phy_8x60_destroy, - .reset = hdmi_phy_8x60_reset, .powerup = hdmi_phy_8x60_powerup, .powerdown = hdmi_phy_8x60_powerdown, }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c index 59fa6cdacb2a..56ab8917ee9a 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c @@ -19,7 +19,6 @@ struct hdmi_phy_8x74 { struct hdmi_phy base; - struct hdmi *hdmi; void __iomem *mmio; }; #define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base) @@ -41,59 +40,6 @@ static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy) kfree(phy_8x74); } -static void hdmi_phy_8x74_reset(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - struct hdmi *hdmi = phy_8x74->hdmi; - unsigned int val; - - /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */ - - val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } - - msleep(100); - - if (val & HDMI_PHY_CTRL_SW_RESET_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET); - } - - if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) { - /* pull high */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val | HDMI_PHY_CTRL_SW_RESET_PLL); - } else { - /* pull low */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - val & ~HDMI_PHY_CTRL_SW_RESET_PLL); - } -} - static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { @@ -117,7 +63,6 @@ static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy) static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = { .destroy = hdmi_phy_8x74_destroy, - .reset = hdmi_phy_8x74_reset, .powerup = hdmi_phy_8x74_powerup, .powerdown = hdmi_phy_8x74_powerdown, }; @@ -138,8 +83,6 @@ struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi) phy->funcs = &hdmi_phy_8x74_funcs; - phy_8x74->hdmi = hdmi; - /* for 8x74, the phy mmio is mapped separately: */ phy_8x74->mmio = msm_ioremap(hdmi->pdev, "phy_physical", "HDMI_8x74"); diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index 978c3f70872a..2aa23b98f8aa 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) - -Copyright (C) 2013 by the following authors: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index 153fc487d683..74b86734fef5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h @@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 4dc158ed2e95..6ac9aa165768 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -682,7 +682,5 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); plane->crtc = crtc; - mdp4_plane_install_properties(plane, &crtc->base); - return crtc; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 7369ee7f0c55..5ed38cf548a1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c @@ -19,8 +19,11 @@ #include "msm_drv.h" #include "mdp4_kms.h" -void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) { + mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR, + irqmask ^ (irqmask & old_irqmask)); mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); } @@ -68,9 +71,10 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) struct drm_device *dev = mdp4_kms->dev; struct msm_drm_private *priv = dev->dev_private; unsigned int id; - uint32_t status; + uint32_t status, enable; - status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); + enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE); + status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable; mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); VERB("status=%08x", status); @@ -86,13 +90,22 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp4_crtc_vblank(crtc), true); + mdp4_disable(mdp4_kms); + return 0; } void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp4_crtc_vblank(crtc), false); + mdp4_disable(mdp4_kms); } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 531e4acc2a87..077f7521a971 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -241,22 +241,37 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms) } #ifdef CONFIG_OF -static struct drm_panel *detect_panel(struct drm_device *dev, const char *name) +static struct drm_panel *detect_panel(struct drm_device *dev) { - struct device_node *n; + struct device_node *endpoint, *panel_node; + struct device_node *np = dev->dev->of_node; struct drm_panel *panel = NULL; - n = of_parse_phandle(dev->dev->of_node, name, 0); - if (n) { - panel = of_drm_find_panel(n); - if (!panel) - panel = ERR_PTR(-EPROBE_DEFER); + endpoint = of_graph_get_next_endpoint(np, NULL); + if (!endpoint) { + dev_err(dev->dev, "no valid endpoint\n"); + return ERR_PTR(-ENODEV); + } + + panel_node = of_graph_get_remote_port_parent(endpoint); + if (!panel_node) { + dev_err(dev->dev, "no valid panel node\n"); + of_node_put(endpoint); + return ERR_PTR(-ENODEV); + } + + of_node_put(endpoint); + + panel = of_drm_find_panel(panel_node); + if (!panel) { + of_node_put(panel_node); + return ERR_PTR(-EPROBE_DEFER); } return panel; } #else -static struct drm_panel *detect_panel(struct drm_device *dev, const char *name) +static struct drm_panel *detect_panel(struct drm_device *dev) { // ??? maybe use a module param to specify which panel is attached? } @@ -294,7 +309,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS: */ - panel = detect_panel(dev, "qcom,lvds-panel"); + panel = detect_panel(dev); if (IS_ERR(panel)) { ret = PTR_ERR(panel); dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret); @@ -527,6 +542,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + return kms; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index c1ecb9d6bdef..8a7f6e1e2bca 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -167,7 +167,8 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer, int mdp4_disable(struct mdp4_kms *mdp4_kms); int mdp4_enable(struct mdp4_kms *mdp4_kms); -void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); void mdp4_irq_preinstall(struct msm_kms *kms); int mdp4_irq_postinstall(struct msm_kms *kms); void mdp4_irq_uninstall(struct msm_kms *kms); @@ -175,29 +176,24 @@ irqreturn_t mdp4_irq(struct msm_kms *kms); int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -static inline bool pipe_supports_yuv(enum mdp4_pipe pipe) +static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe) { switch (pipe) { case VG1: case VG2: case VG3: case VG4: - return true; + return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; + case RGB1: + case RGB2: + case RGB3: + return MDP_PIPE_CAP_SCALE; default: - return false; + return 0; } } -static inline -uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, - uint32_t max_formats) -{ - return mdp_get_formats(pixel_formats, max_formats, - !pipe_supports_yuv(pipe_id)); -} - -void mdp4_plane_install_properties(struct drm_plane *plane, - struct drm_mode_object *obj); enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp4_plane_init(struct drm_device *dev, enum mdp4_pipe pipe_id, bool private_plane); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c index c04843376c54..4cd6e721aa0a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c @@ -346,8 +346,10 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); - if (panel) + if (panel) { drm_panel_disable(panel); + drm_panel_unprepare(panel); + } /* * Wait for a vsync so we know the ENABLE=0 latched before @@ -412,8 +414,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) if (ret) dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); - if (panel) + if (panel) { + drm_panel_prepare(panel); drm_panel_enable(panel); + } setup_phy(encoder); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 247a424445f7..e9dee367b597 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -26,6 +26,7 @@ struct mdp4_plane { enum mdp4_pipe pipe; + uint32_t caps; uint32_t nformats; uint32_t formats[32]; @@ -74,7 +75,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane) } /* helper to install properties which are common to planes and crtcs */ -void mdp4_plane_install_properties(struct drm_plane *plane, +static void mdp4_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) { // XXX @@ -382,9 +383,11 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev, mdp4_plane->pipe = pipe_id; mdp4_plane->name = pipe_names[pipe_id]; + mdp4_plane->caps = mdp4_pipe_caps(pipe_id); - mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats, - ARRAY_SIZE(mdp4_plane->formats)); + mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats, + ARRAY_SIZE(mdp4_plane->formats), + !pipe_supports_yuv(mdp4_plane->caps)); type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 50e17527e2e5..3469f50d5590 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -381,49 +381,49 @@ static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x0 static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } #define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 #define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; } #define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 #define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; } #define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 #define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; } #define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 #define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; } #define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 #define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; } #define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 #define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; } #define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 #define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; } #define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 #define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; } @@ -431,13 +431,13 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) #define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 #define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 #define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; } #define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 #define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val) +static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val) { return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; } @@ -499,6 +499,44 @@ static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __o static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } +static inline uint32_t __offset_LAYER_EXT(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000040; + case 1: return 0x00000044; + case 2: return 0x00000048; + case 3: return 0x0000004c; + case 4: return 0x00000050; + case 5: return 0x00000054; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } + +static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } +#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001 +#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004 +#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010 +#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040 +#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100 +#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400 +#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000 +#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000 +#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000 +#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK; +} +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK; +} + static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) { switch (idx) { @@ -803,11 +841,11 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) } #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000 -#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val) +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000 +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val) { - return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; + return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK; } #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 @@ -897,41 +935,41 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK; } -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val) +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val) { - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK; + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK; } static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } @@ -984,9 +1022,22 @@ static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x000000 static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t __offset_BLEND(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000020; + case 1: return 0x00000050; + case 2: return 0x00000080; + case 3: return 0x000000b0; + case 4: return 0x00000230; + case 5: return 0x00000260; + case 6: return 0x00000290; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) @@ -1008,25 +1059,25 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 -static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); } static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } #define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff @@ -1260,6 +1311,13 @@ static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x000000 static inline uint32_t __offset_WB(uint32_t idx) { switch (idx) { +#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */ + case 0: return (mdp5_cfg->wb.base[0]); + case 1: return (mdp5_cfg->wb.base[1]); + case 2: return (mdp5_cfg->wb.base[2]); + case 3: return (mdp5_cfg->wb.base[3]); + case 4: return (mdp5_cfg->wb.base[4]); +#endif default: return INVALID_IDX(idx); } } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index 8b9a7931b162..a1e26f23c7cc 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -22,7 +22,76 @@ struct mdp5_cfg_handler { /* mdp5_cfg must be exposed (used in mdp5.xml.h) */ const struct mdp5_cfg_hw *mdp5_cfg = NULL; -const struct mdp5_cfg_hw msm8x74_config = { +const struct mdp5_cfg_hw msm8x74v1_config = { + .name = "msm8x74v1", + .mdp = { + .count = 1, + .base = { 0x00100 }, + }, + .smp = { + .mmb_count = 22, + .mmb_size = 4096, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + .flush_hw_mask = 0x0003ffff, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01200, 0x01600, 0x01a00 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + 0, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01e00, 0x02200, 0x02600 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + 0, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02a00, 0x02e00 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + 0, + }, + .lm = { + .count = 5, + .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, + .nb_stages = 5, + }, + .dspp = { + .count = 3, + .base = { 0x04600, 0x04a00, 0x04e00 }, + }, + .pp = { + .count = 3, + .base = { 0x21b00, 0x21c00, 0x21d00 }, + }, + .intf = { + .base = { 0x21100, 0x21300, 0x21500, 0x21700 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 200000000, +}; + +const struct mdp5_cfg_hw msm8x74v2_config = { .name = "msm8x74", .mdp = { .count = 1, @@ -45,19 +114,27 @@ const struct mdp5_cfg_hw msm8x74_config = { .pipe_vig = { .count = 3, .base = { 0x01200, 0x01600, 0x01a00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 3, .base = { 0x01e00, 0x02200, 0x02600 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 2, .base = { 0x02a00, 0x02e00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 5, .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, }, .dspp = { .count = 3, @@ -65,7 +142,7 @@ const struct mdp5_cfg_hw msm8x74_config = { }, .ad = { .count = 2, - .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ + .base = { 0x13100, 0x13300 }, }, .pp = { .count = 3, @@ -113,19 +190,27 @@ const struct mdp5_cfg_hw apq8084_config = { .pipe_vig = { .count = 4, .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 4, .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 2, .base = { 0x03200, 0x03600 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 6, .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, }, .dspp = { .count = 4, @@ -174,19 +259,27 @@ const struct mdp5_cfg_hw msm8x16_config = { .pipe_vig = { .count = 1, .base = { 0x05000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 2, .base = { 0x15000, 0x17000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 1, .base = { 0x25000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 2, /* LM0 and LM3 */ .base = { 0x45000, 0x48000 }, .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, }, .dspp = { .count = 1, @@ -203,14 +296,91 @@ const struct mdp5_cfg_hw msm8x16_config = { .max_clk = 320000000, }; +const struct mdp5_cfg_hw msm8x94_config = { + .name = "msm8x94", + .mdp = { + .count = 1, + .base = { 0x01000 }, + }, + .smp = { + .mmb_count = 44, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, + [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, + [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, + }, + .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */ + .reserved = { + [1] = 1, [4] = 1, [7] = 1, [19] = 1, + [16] = 5, [17] = 5, [18] = 5, [22] = 5, + }, + }, + .ctl = { + .count = 5, + .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, + .flush_hw_mask = 0xf0ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x25000, 0x27000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 6, + .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 4, + .base = { 0x55000, 0x57000, 0x59000, 0x5b000 }, + + }, + .ad = { + .count = 3, + .base = { 0x79000, 0x79800, 0x7a000 }, + }, + .pp = { + .count = 4, + .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, + }, + .intf = { + .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 320000000, +}; + static const struct mdp5_cfg_handler cfg_handlers[] = { - { .revision = 0, .config = { .hw = &msm8x74_config } }, - { .revision = 2, .config = { .hw = &msm8x74_config } }, + { .revision = 0, .config = { .hw = &msm8x74v1_config } }, + { .revision = 2, .config = { .hw = &msm8x74v2_config } }, { .revision = 3, .config = { .hw = &apq8084_config } }, { .revision = 6, .config = { .hw = &msm8x16_config } }, + { .revision = 9, .config = { .hw = &msm8x94_config } }, }; - static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev); const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index 69349abe59f2..efb918d9f68b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -42,6 +42,13 @@ struct mdp5_sub_block { struct mdp5_lm_block { MDP5_SUB_BLOCK_DEFINITION; uint32_t nb_stages; /* number of stages per blender */ + uint32_t max_width; /* Maximum output resolution */ + uint32_t max_height; +}; + +struct mdp5_pipe_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t caps; /* pipe capabilities */ }; struct mdp5_ctl_block { @@ -70,9 +77,9 @@ struct mdp5_cfg_hw { struct mdp5_sub_block mdp; struct mdp5_smp_block smp; struct mdp5_ctl_block ctl; - struct mdp5_sub_block pipe_vig; - struct mdp5_sub_block pipe_rgb; - struct mdp5_sub_block pipe_dma; + struct mdp5_pipe_block pipe_vig; + struct mdp5_pipe_block pipe_rgb; + struct mdp5_pipe_block pipe_dma; struct mdp5_lm_block lm; struct mdp5_sub_block dspp; struct mdp5_sub_block ad; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index ee31b16fe7ea..8e6c9b598a57 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -21,6 +21,8 @@ struct mdp5_cmd_encoder { struct mdp5_interface intf; bool enabled; uint32_t bsc; + + struct mdp5_ctl *ctl; }; #define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base) @@ -210,13 +212,14 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, mode->vsync_end, mode->vtotal, mode->type, mode->flags); pingpong_tearcheck_setup(encoder, mode); - mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf); + mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf, + mdp5_cmd_enc->ctl); } static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) { struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; if (WARN_ON(!mdp5_cmd_enc->enabled)) @@ -235,7 +238,7 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) { struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; if (WARN_ON(mdp5_cmd_enc->enabled)) @@ -300,7 +303,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, /* initialize command mode encoder */ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf) + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { struct drm_encoder *encoder = NULL; struct mdp5_cmd_encoder *mdp5_cmd_enc; @@ -320,6 +323,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf)); encoder = &mdp5_cmd_enc->base; + mdp5_cmd_enc->ctl = ctl; drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, DRM_MODE_ENCODER_DSI); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 4c1df4e6e5bc..7f9f4ac88029 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -160,8 +160,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) if (mdp5_crtc->ctl && !crtc->state->enable) { /* set STAGE_UNUSED for all layers */ - mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); - mdp5_ctl_release(mdp5_crtc->ctl); + mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0); mdp5_crtc->ctl = NULL; } } @@ -196,13 +195,9 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, /* * blend_setup() - blend all the planes of a CRTC * - * When border is enabled, the border color will ALWAYS be the base layer. - * Therefore, the first plane (private RGB pipe) will start at STAGE0. - * If disabled, the first plane starts at STAGE_BASE. - * - * Note: - * Border is not enabled here because the private plane is exactly - * the CRTC resolution. + * If no base layer is available, border will be enabled as the base layer. + * Otherwise all layers will be blended based on their stage calculated + * in mdp5_crtc_atomic_check. */ static void blend_setup(struct drm_crtc *crtc) { @@ -210,9 +205,14 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; const struct mdp5_cfg_hw *hw_cfg; - uint32_t lm = mdp5_crtc->lm, blend_cfg = 0; + struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; + const struct mdp_format *format; + uint32_t lm = mdp5_crtc->lm; + uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; -#define blender(stage) ((stage) - STAGE_BASE) + uint8_t stage[STAGE_MAX + 1]; + int i, plane_cnt = 0; +#define blender(stage) ((stage) - STAGE0) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); @@ -222,33 +222,73 @@ static void blend_setup(struct drm_crtc *crtc) if (!mdp5_crtc->ctl) goto out; + /* Collect all plane information */ drm_atomic_crtc_for_each_plane(plane, crtc) { - enum mdp_mixer_stage_id stage = - to_mdp5_plane_state(plane->state)->stage; + pstate = to_mdp5_plane_state(plane->state); + pstates[pstate->stage] = pstate; + stage[pstate->stage] = mdp5_plane_pipe(plane); + plane_cnt++; + } - /* - * Note: This cannot happen with current implementation but - * we need to check this condition once z property is added - */ - BUG_ON(stage > hw_cfg->lm.nb_stages); + /* + * If there is no base layer, enable border color. + * Although it's not possbile in current blend logic, + * put it here as a reminder. + */ + if (!pstates[STAGE_BASE] && plane_cnt) { + ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; + DBG("Border Color is enabled"); + } - /* LM */ - mdp5_write(mdp5_kms, - REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)), - MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | - MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST)); + /* The reset for blending */ + for (i = STAGE0; i <= STAGE_MAX; i++) { + if (!pstates[i]) + continue; + + format = to_mdp_format( + msm_framebuffer_format(pstates[i]->base.fb)); + plane = pstates[i]->base.plane; + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); + fg_alpha = pstates[i]->alpha; + bg_alpha = 0xFF - pstates[i]->alpha; + DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); + + if (format->alpha_enable && pstates[i]->premultiplied) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } else if (format->alpha_enable) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } + + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm, + blender(i)), blend_op); mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm, - blender(stage)), 0xff); + blender(i)), fg_alpha); mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, - blender(stage)), 0x00); - /* CTL */ - blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage); - DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name, - pipe2name(mdp5_plane_pipe(plane)), stage); + blender(i)), bg_alpha); } - DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg); - mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg); + mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); out: spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); @@ -339,25 +379,19 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; struct drm_device *dev = crtc->dev; - struct plane_state pstates[STAGE3 + 1]; + struct plane_state pstates[STAGE_MAX + 1]; + const struct mdp5_cfg_hw *hw_cfg; int cnt = 0, i; DBG("%s: check", mdp5_crtc->name); - /* request a free CTL, if none is already allocated for this CRTC */ - if (state->enable && !mdp5_crtc->ctl) { - mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); - if (WARN_ON(!mdp5_crtc->ctl)) - return -EINVAL; - } - /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: */ + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); drm_atomic_crtc_state_for_each_plane(plane, state) { struct drm_plane_state *pstate; - - if (cnt >= ARRAY_SIZE(pstates)) { + if (cnt >= (hw_cfg->lm.nb_stages)) { dev_err(dev->dev, "too many planes!\n"); return -EINVAL; } @@ -369,13 +403,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, */ if (!pstate) pstate = plane->state; - pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); cnt++; } + /* assign a stage based on sorted zpos property */ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); for (i = 0; i < cnt; i++) { @@ -693,8 +727,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) complete_flip(crtc, file); } -/* set interface for routing crtc->encoder: */ -void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf) +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); @@ -717,7 +751,8 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf) mdp_irq_update(&mdp5_kms->base); - mdp5_ctl_set_intf(mdp5_crtc->ctl, intf); + mdp5_crtc->ctl = ctl; + mdp5_ctl_set_pipeline(ctl, intf, lm); } int mdp5_crtc_get_lm(struct drm_crtc *crtc) @@ -726,12 +761,6 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc) return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; } -struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl; -} - void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); @@ -776,7 +805,5 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); plane->crtc = crtc; - mdp5_plane_install_properties(plane, &crtc->base); - return crtc; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index f2530f224a76..4e81ca4f964a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -17,7 +17,7 @@ /* * CTL - MDP Control Pool Manager * - * Controls are shared between all CRTCs. + * Controls are shared between all display interfaces. * * They are intended to be used for data path configuration. * The top level register programming describes the complete data path for @@ -27,12 +27,11 @@ * * In certain use cases (high-resolution dual pipe), one single CTL can be * shared across multiple CRTCs. - * - * Because the number of CTLs can be less than the number of CRTCs, - * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is - * requested by the client (in mdp5_crtc_mode_set()). */ +#define CTL_STAT_BUSY 0x1 +#define CTL_STAT_BOOKED 0x2 + struct op_mode { struct mdp5_interface intf; @@ -46,8 +45,8 @@ struct mdp5_ctl { u32 id; int lm; - /* whether this CTL has been allocated or not: */ - bool busy; + /* CTL status bitmask */ + u32 status; /* Operation Mode Configuration for the Pipeline */ struct op_mode pipeline; @@ -61,7 +60,10 @@ struct mdp5_ctl { bool cursor_on; - struct drm_crtc *crtc; + /* True if the current CTL has FLUSH bits pending for single FLUSH. */ + bool flush_pending; + + struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ }; struct mdp5_ctl_manager { @@ -74,6 +76,10 @@ struct mdp5_ctl_manager { /* to filter out non-present bits in the current hardware config */ u32 flush_hw_mask; + /* status for single FLUSH */ + bool single_flush_supported; + u32 single_flush_pending_mask; + /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ spinlock_t pool_lock; struct mdp5_ctl ctls[MAX_CTL]; @@ -168,11 +174,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) spin_unlock_irqrestore(&ctl->hw_lock, flags); } -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf) +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, + struct mdp5_interface *intf, int lm) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) { + dev_err(mdp5_kms->dev->dev, + "CTL %d is allocated by INTF %d, but used by INTF %d\n", + ctl->id, ctl->pipeline.intf.num, intf->num); + return -EINVAL; + } + + ctl->lm = lm; + memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) | @@ -287,29 +303,85 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + ctl->cursor_on = enable; spin_unlock_irqrestore(&ctl->hw_lock, flags); ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); - ctl->cursor_on = enable; return 0; } -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) +static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); + case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); + case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); + case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); + case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); + case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); + case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); + case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); + case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); + case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); + default: return 0; + } +} + +static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + if (stage < STAGE6) + return 0; + + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3; + case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3; + case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3; + case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3; + case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3; + case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3; + case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3; + case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; + case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; + case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; + default: return 0; + } +} + +int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, + u32 ctl_blend_op_flags) { unsigned long flags; + u32 blend_cfg = 0, blend_ext_cfg = 0; + int i, start_stage; + + if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { + start_stage = STAGE0; + blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; + } else { + start_stage = STAGE_BASE; + } + for (i = start_stage; i < start_stage + stage_cnt; i++) { + blend_cfg |= mdp_ctl_blend_mask(stage[i], i); + blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); + } + + spin_lock_irqsave(&ctl->hw_lock, flags); if (ctl->cursor_on) blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; - else - blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; - spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg); spin_unlock_irqrestore(&ctl->hw_lock, flags); - ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm); + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm); + + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm, + blend_cfg, blend_ext_cfg); return 0; } @@ -379,6 +451,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) return sw_mask; } +static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, + u32 *flush_id) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + + if (ctl->pair) { + DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); + ctl->flush_pending = true; + ctl_mgr->single_flush_pending_mask |= (*flush_mask); + *flush_mask = 0; + + if (ctl->pair->flush_pending) { + *flush_id = min_t(u32, ctl->id, ctl->pair->id); + *flush_mask = ctl_mgr->single_flush_pending_mask; + + ctl->flush_pending = false; + ctl->pair->flush_pending = false; + ctl_mgr->single_flush_pending_mask = 0; + + DBG("Single FLUSH mask %x,ID %d", *flush_mask, + *flush_id); + } + } +} + /** * mdp5_ctl_commit() - Register Flush * @@ -400,6 +497,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; struct op_mode *pipeline = &ctl->pipeline; unsigned long flags; + u32 flush_id = ctl->id; + u32 curr_ctl_flush_mask; pipeline->start_mask &= ~flush_mask; @@ -415,9 +514,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) flush_mask &= ctl_mgr->flush_hw_mask; + curr_ctl_flush_mask = flush_mask; + + fix_for_single_flush(ctl, &flush_mask, &flush_id); + if (flush_mask) { spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask); + ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); spin_unlock_irqrestore(&ctl->hw_lock, flags); } @@ -426,7 +529,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) refill_start_mask(ctl); } - return flush_mask; + return curr_ctl_flush_mask; } u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) @@ -434,59 +537,85 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); } -void mdp5_ctl_release(struct mdp5_ctl *ctl) +int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) { - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - unsigned long flags; + return WARN_ON(!ctl) ? -EINVAL : ctl->id; +} - if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) { - dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)", - ctl->id, ctl->busy); - return; +/* + * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH + */ +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; + struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + + /* do nothing silently if hw doesn't support */ + if (!ctl_mgr->single_flush_supported) + return 0; + + if (!enable) { + ctlx->pair = NULL; + ctly->pair = NULL; + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0); + return 0; + } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { + dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); + return -EINVAL; + } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { + dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); + return -EINVAL; } - spin_lock_irqsave(&ctl_mgr->pool_lock, flags); - ctl->busy = false; - spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + ctlx->pair = ctly; + ctly->pair = ctlx; - DBG("CTL %d released", ctl->id); -} + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), + MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); -int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) -{ - return WARN_ON(!ctl) ? -EINVAL : ctl->id; + return 0; } /* - * mdp5_ctl_request() - CTL dynamic allocation + * mdp5_ctl_request() - CTL allocation * - * Note: Current implementation considers that we can only have one CRTC per CTL + * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. + * If no CTL is available in preferred category, allocate from the other one. * - * @return first free CTL + * @return fail if no CTL is available. */ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, - struct drm_crtc *crtc) + int intf_num) { struct mdp5_ctl *ctl = NULL; + const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED; + u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; unsigned long flags; int c; spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + /* search the preferred */ for (c = 0; c < ctl_mgr->nctl; c++) - if (!ctl_mgr->ctls[c].busy) - break; + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; - if (unlikely(c >= ctl_mgr->nctl)) { - dev_err(ctl_mgr->dev->dev, "No more CTL available!"); - goto unlock; - } + dev_warn(ctl_mgr->dev->dev, + "fall back to the other CTL category for INTF %d!\n", intf_num); - ctl = &ctl_mgr->ctls[c]; + match ^= CTL_STAT_BOOKED; + for (c = 0; c < ctl_mgr->nctl; c++) + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; - ctl->lm = mdp5_crtc_get_lm(crtc); - ctl->crtc = crtc; - ctl->busy = true; + dev_err(ctl_mgr->dev->dev, "No more CTL available!"); + goto unlock; + +found: + ctl = &ctl_mgr->ctls[c]; + ctl->pipeline.intf.num = intf_num; + ctl->lm = -1; + ctl->status |= CTL_STAT_BUSY; ctl->pending_ctl_trigger = 0; DBG("CTL %d allocated", ctl->id); @@ -515,9 +644,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) } struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) { struct mdp5_ctl_manager *ctl_mgr; + const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); + int rev = mdp5_cfg_get_hw_rev(cfg_hnd); const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; unsigned long flags; int c, ret; @@ -551,14 +682,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, if (WARN_ON(!ctl_cfg->base[c])) { dev_err(dev->dev, "CTL_%d: base is null!\n", c); ret = -EINVAL; + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); goto fail; } ctl->ctlm = ctl_mgr; ctl->id = c; ctl->reg_offset = ctl_cfg->base[c]; - ctl->busy = false; + ctl->status = 0; spin_lock_init(&ctl->hw_lock); } + + /* + * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI + * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when + * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. + * Single FLUSH is supported from hw rev v3.0. + */ + if (rev >= 3) { + ctl_mgr->single_flush_supported = true; + /* Reserve CTL0/1 for INTF1/2 */ + ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; + ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; + } spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); DBG("Pool of %d CTLs created.", ctl_mgr->nctl); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 4678228c4f14..96148c6f863c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -23,7 +23,7 @@ */ struct mdp5_ctl_manager; struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg); + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); @@ -32,49 +32,32 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler, * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. */ -struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num); + int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); struct mdp5_interface; -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf); +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, + int lm); int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); - -/* - * blend_cfg (LM blender config): - * - * The function below allows the caller of mdp5_ctl_blend() to specify how pipes - * are being blended according to their stage (z-order), through @blend_cfg arg. - */ -static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, - enum mdp_mixer_stage_id stage) -{ - switch (pipe) { - case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); - case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); - case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); - case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); - case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); - case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); - case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); - case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); - case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); - case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); - default: return 0; - } -} +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); /* * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) * - * @blend_cfg: see LM blender config definition below + * @stage: array to contain the pipe num for each stage + * @stage_cnt: valid stage number in stage array + * @ctl_blend_op_flags: blender operation mode flags * * Note: * CTL registers need to be flushed after calling this function * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) */ -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg); +#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) +int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, + u32 ctl_blend_op_flags); /** * mdp_ctl_flush_mask...() - Register FLUSH masks @@ -91,8 +74,6 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); -void mdp5_ctl_release(struct mdp5_ctl *ctl); - #endif /* __MDP5_CTL_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index de97c08f3f1f..c9e32b08a7a0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -27,6 +27,8 @@ struct mdp5_encoder { spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ bool enabled; uint32_t bsc; + + struct mdp5_ctl *ctl; }; #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) @@ -222,14 +224,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf); + mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf, + mdp5_encoder->ctl); } static void mdp5_encoder_disable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; int lm = mdp5_crtc_get_lm(encoder->crtc); struct mdp5_interface *intf = &mdp5_encoder->intf; int intfn = mdp5_encoder->intf.num; @@ -264,7 +267,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; struct mdp5_interface *intf = &mdp5_encoder->intf; int intfn = mdp5_encoder->intf.num; unsigned long flags; @@ -294,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); struct mdp5_kms *mdp5_kms; int intf_num; u32 data = 0; @@ -316,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, /* Make sure clocks are on when connectors calling this function. */ mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), - MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); /* Dumb Panel, Sync mode */ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); + + mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); + mdp5_disable(mdp5_kms); return 0; @@ -329,7 +334,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, /* initialize encoder */ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf) + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { struct drm_encoder *encoder = NULL; struct mdp5_encoder *mdp5_encoder; @@ -345,6 +350,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf)); encoder = &mdp5_encoder->base; + mdp5_encoder->ctl = ctl; spin_lock_init(&mdp5_encoder->intf_lock); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 33bd4c6160dd..b1f73bee1368 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -21,8 +21,11 @@ #include "msm_drv.h" #include "mdp5_kms.h" -void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) { + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0), + irqmask ^ (irqmask & old_irqmask)); mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); } @@ -71,9 +74,10 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; unsigned int id; - uint32_t status; + uint32_t status, enable; - status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)); + enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0)); + status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable; mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); VERB("status=%08x", status); @@ -112,15 +116,24 @@ irqreturn_t mdp5_irq(struct msm_kms *kms) int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp5_enable(mdp5_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), true); + mdp5_disable(mdp5_kms); + return 0; } void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp5_enable(mdp5_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), false); + mdp5_disable(mdp5_kms); } /* diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index e253db5de5aa..047cb0433ccb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -177,7 +177,8 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms) clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); - clk_disable_unprepare(mdp5_kms->lut_clk); + if (mdp5_kms->lut_clk) + clk_disable_unprepare(mdp5_kms->lut_clk); return 0; } @@ -189,14 +190,15 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) clk_prepare_enable(mdp5_kms->ahb_clk); clk_prepare_enable(mdp5_kms->axi_clk); clk_prepare_enable(mdp5_kms->core_clk); - clk_prepare_enable(mdp5_kms->lut_clk); + if (mdp5_kms->lut_clk) + clk_prepare_enable(mdp5_kms->lut_clk); return 0; } static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, enum mdp5_intf_type intf_type, int intf_num, - enum mdp5_intf_mode intf_mode) + enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; @@ -209,9 +211,9 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, if ((intf_type == INTF_DSI) && (intf_mode == MDP5_INTF_DSI_MODE_COMMAND)) - encoder = mdp5_cmd_encoder_init(dev, &intf); + encoder = mdp5_cmd_encoder_init(dev, &intf, ctl); else - encoder = mdp5_encoder_init(dev, &intf); + encoder = mdp5_encoder_init(dev, &intf, ctl); if (IS_ERR(encoder)) { dev_err(dev->dev, "failed to construct encoder\n"); @@ -249,6 +251,8 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num]; + struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; + struct mdp5_ctl *ctl; struct drm_encoder *encoder; int ret = 0; @@ -259,8 +263,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->edp) break; + ctl = mdp5_ctlm_request(ctlm, intf_num); + if (!ctl) { + ret = -EINVAL; + break; + } + encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, - MDP5_INTF_MODE_NONE); + MDP5_INTF_MODE_NONE, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -272,8 +282,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->hdmi) break; + ctl = mdp5_ctlm_request(ctlm, intf_num); + if (!ctl) { + ret = -EINVAL; + break; + } + encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, - MDP5_INTF_MODE_NONE); + MDP5_INTF_MODE_NONE, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -298,14 +314,20 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->dsi[dsi_id]) break; + ctl = mdp5_ctlm_request(ctlm, intf_num); + if (!ctl) { + ret = -EINVAL; + break; + } + for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { mode = (i == MSM_DSI_CMD_ENCODER_ID) ? MDP5_INTF_DSI_MODE_COMMAND : MDP5_INTF_DSI_MODE_VIDEO; dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI, - intf_num, mode); - if (IS_ERR(dsi_encs)) { - ret = PTR_ERR(dsi_encs); + intf_num, mode, ctl); + if (IS_ERR(dsi_encs[i])) { + ret = PTR_ERR(dsi_encs[i]); break; } } @@ -327,9 +349,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) static const enum mdp5_pipe crtcs[] = { SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, }; - static const enum mdp5_pipe pub_planes[] = { + static const enum mdp5_pipe vig_planes[] = { SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, }; + static const enum mdp5_pipe dma_planes[] = { + SSPP_DMA0, SSPP_DMA1, + }; struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; const struct mdp5_cfg_hw *hw_cfg; @@ -350,7 +375,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) struct drm_crtc *crtc; plane = mdp5_plane_init(dev, crtcs[i], true, - hw_cfg->pipe_rgb.base[i]); + hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps); if (IS_ERR(plane)) { ret = PTR_ERR(plane); dev_err(dev->dev, "failed to construct plane for %s (%d)\n", @@ -368,16 +393,30 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) priv->crtcs[priv->num_crtcs++] = crtc; } - /* Construct public planes: */ + /* Construct video planes: */ for (i = 0; i < hw_cfg->pipe_vig.count; i++) { struct drm_plane *plane; - plane = mdp5_plane_init(dev, pub_planes[i], false, - hw_cfg->pipe_vig.base[i]); + plane = mdp5_plane_init(dev, vig_planes[i], false, + hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + dev_err(dev->dev, "failed to construct %s plane: %d\n", + pipe2name(vig_planes[i]), ret); + goto fail; + } + } + + /* DMA planes */ + for (i = 0; i < hw_cfg->pipe_dma.count; i++) { + struct drm_plane *plane; + + plane = mdp5_plane_init(dev, dma_planes[i], false, + hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps); if (IS_ERR(plane)) { ret = PTR_ERR(plane); dev_err(dev->dev, "failed to construct %s plane: %d\n", - pipe2name(pub_planes[i]), ret); + pipe2name(dma_planes[i]), ret); goto fail; } } @@ -489,7 +528,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk"); if (ret) - goto fail; + DBG("failed to get (optional) lut_clk clock"); ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk"); if (ret) goto fail; @@ -521,7 +560,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } - mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw); + mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); if (IS_ERR(mdp5_kms->ctlm)) { ret = PTR_ERR(mdp5_kms->ctlm); mdp5_kms->ctlm = NULL; @@ -577,6 +616,11 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = config->hw->lm.max_width; + dev->mode_config.max_height = config->hw->lm.max_height; + return kms; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index e79ac09b7216..0bb62423586e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -70,18 +70,12 @@ struct mdp5_kms { struct mdp5_plane_state { struct drm_plane_state base; - /* "virtual" zpos.. we calculate actual mixer-stage at runtime - * by sorting the attached planes by zpos and then assigning - * mixer stage lowest to highest. Private planes get default - * zpos of zero, and public planes a unique value that is - * greater than zero. This way, things work out if a naive - * userspace assigns planes to a crtc without setting zpos. - */ - int zpos; + /* aligned with property */ + uint8_t premultiplied; + uint8_t zpos; + uint8_t alpha; - /* the actual mixer stage, calculated in crtc->atomic_check() - * NOTE: this should move to mdp5_crtc_state, when that exists - */ + /* assigned by crtc blender */ enum mdp_mixer_stage_id stage; /* some additional transactional status to help us know in the @@ -192,7 +186,8 @@ static inline uint32_t lm2ppdone(int lm) int mdp5_disable(struct mdp5_kms *mdp5_kms); int mdp5_enable(struct mdp5_kms *mdp5_kms); -void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); void mdp5_irq_preinstall(struct msm_kms *kms); int mdp5_irq_postinstall(struct msm_kms *kms); void mdp5_irq_uninstall(struct msm_kms *kms); @@ -202,60 +197,38 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); -static inline bool pipe_supports_yuv(enum mdp5_pipe pipe) -{ - switch (pipe) { - case SSPP_VIG0: - case SSPP_VIG1: - case SSPP_VIG2: - case SSPP_VIG3: - return true; - default: - return false; - } -} - -static inline -uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, - uint32_t max_formats) -{ - return mdp_get_formats(pixel_formats, max_formats, - !pipe_supports_yuv(pipe)); -} - -void mdp5_plane_install_properties(struct drm_plane *plane, - struct drm_mode_object *obj); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); void mdp5_plane_complete_flip(struct drm_plane *plane); void mdp5_plane_complete_commit(struct drm_plane *plane, struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); + enum mdp5_pipe pipe, bool private_plane, + uint32_t reg_offset, uint32_t caps); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); int mdp5_crtc_get_lm(struct drm_crtc *crtc); -struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); -void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf); +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, + struct mdp5_interface *intf, struct mdp5_ctl *ctl); void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_plane *plane, int id); struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf); + struct mdp5_interface *intf, struct mdp5_ctl *ctl); int mdp5_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder); #ifdef CONFIG_DRM_MSM_DSI struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf); + struct mdp5_interface *intf, struct mdp5_ctl *ctl); int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder); #else -static inline struct drm_encoder *mdp5_cmd_encoder_init( - struct drm_device *dev, struct mdp5_interface *intf) +static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf, struct mdp5_ctl *ctl) { return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 22275568ab8b..07fb62fea6dc 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -26,6 +26,7 @@ struct mdp5_plane { spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ uint32_t reg_offset; + uint32_t caps; uint32_t flush_mask; /* used to commit pipe registers */ @@ -40,6 +41,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); + static void set_scanout_locked(struct drm_plane *plane, struct drm_framebuffer *fb); @@ -64,18 +66,122 @@ static void mdp5_plane_destroy(struct drm_plane *plane) kfree(mdp5_plane); } +static void mdp5_plane_install_rotation_property(struct drm_device *dev, + struct drm_plane *plane) +{ + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + + if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) && + !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) + return; + + if (!dev->mode_config.rotation_property) + dev->mode_config.rotation_property = + drm_mode_create_rotation_property(dev, + BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y)); + + if (dev->mode_config.rotation_property) + drm_object_attach_property(&plane->base, + dev->mode_config.rotation_property, + 0); +} + /* helper to install properties which are common to planes and crtcs */ -void mdp5_plane_install_properties(struct drm_plane *plane, +static void mdp5_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) { - // XXX + struct drm_device *dev = plane->dev; + struct msm_drm_private *dev_priv = dev->dev_private; + struct drm_property *prop; + +#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \ + prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \ + if (!prop) { \ + prop = drm_property_##fnc(dev, 0, #name, \ + ##__VA_ARGS__); \ + if (!prop) { \ + dev_warn(dev->dev, \ + "Create property %s failed\n", \ + #name); \ + return; \ + } \ + dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \ + } \ + drm_object_attach_property(&plane->base, prop, init_val); \ + } while (0) + +#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_range, min, max) + +#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \ + INSTALL_PROPERTY(name, NAME, init_val, \ + create_enum, name##_prop_enum_list, \ + ARRAY_SIZE(name##_prop_enum_list)) + + INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1); + + mdp5_plane_install_rotation_property(dev, plane); + +#undef INSTALL_RANGE_PROPERTY +#undef INSTALL_ENUM_PROPERTY +#undef INSTALL_PROPERTY +} + +static int mdp5_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = plane->dev; + struct mdp5_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + pstate = to_mdp5_plane_state(state); + +#define SET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + pstate->name = (type)val; \ + DBG("Set property %s %d", #name, (type)val); \ + goto done; \ + } \ + } while (0) + + SET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + return ret; +#undef SET_PROPERTY } -int mdp5_plane_set_property(struct drm_plane *plane, - struct drm_property *property, uint64_t val) +static int mdp5_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, uint64_t *val) { - // XXX - return -EINVAL; + struct drm_device *dev = plane->dev; + struct mdp5_plane_state *pstate; + struct msm_drm_private *dev_priv = dev->dev_private; + int ret = 0; + + pstate = to_mdp5_plane_state(state); + +#define GET_PROPERTY(name, NAME, type) do { \ + if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \ + *val = pstate->name; \ + DBG("Get property %s %lld", #name, *val); \ + goto done; \ + } \ + } while (0) + + GET_PROPERTY(zpos, ZPOS, uint8_t); + + dev_err(dev->dev, "Invalid property\n"); + ret = -EINVAL; +done: + return ret; +#undef SET_PROPERTY } static void mdp5_plane_reset(struct drm_plane *plane) @@ -88,11 +194,15 @@ static void mdp5_plane_reset(struct drm_plane *plane) kfree(to_mdp5_plane_state(plane->state)); mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); - if (plane->type == DRM_PLANE_TYPE_PRIMARY) { - mdp5_state->zpos = 0; - } else { - mdp5_state->zpos = 1 + drm_plane_index(plane); - } + /* assign default blend parameters */ + mdp5_state->alpha = 255; + mdp5_state->premultiplied = 0; + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + mdp5_state->zpos = STAGE_BASE; + else + mdp5_state->zpos = STAGE0 + drm_plane_index(plane); + mdp5_state->base.plane = plane; plane->state = &mdp5_state->base; @@ -131,7 +241,9 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = mdp5_plane_destroy, - .set_property = mdp5_plane_set_property, + .set_property = drm_atomic_helper_plane_set_property, + .atomic_set_property = mdp5_plane_atomic_set_property, + .atomic_get_property = mdp5_plane_atomic_get_property, .reset = mdp5_plane_reset, .atomic_duplicate_state = mdp5_plane_duplicate_state, .atomic_destroy_state = mdp5_plane_destroy_state, @@ -164,10 +276,44 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *old_state = plane->state; + const struct mdp_format *format; + bool vflip, hflip; DBG("%s: check (%d -> %d)", mdp5_plane->name, plane_enabled(old_state), plane_enabled(state)); + if (plane_enabled(state)) { + format = to_mdp_format(msm_framebuffer_format(state->fb)); + if (MDP_FORMAT_IS_YUV(format) && + !pipe_supports_yuv(mdp5_plane->caps)) { + dev_err(plane->dev->dev, + "Pipe doesn't support YUV\n"); + + return -EINVAL; + } + + if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && + (((state->src_w >> 16) != state->crtc_w) || + ((state->src_h >> 16) != state->crtc_h))) { + dev_err(plane->dev->dev, + "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", + state->src_w >> 16, state->src_h >> 16, + state->crtc_w, state->crtc_h); + + return -EINVAL; + } + + hflip = !!(state->rotation & BIT(DRM_REFLECT_X)); + vflip = !!(state->rotation & BIT(DRM_REFLECT_Y)); + if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || + (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { + dev_err(plane->dev->dev, + "Pipe doesn't support flip\n"); + + return -EINVAL; + } + } + if (plane_enabled(state) && plane_enabled(old_state)) { /* we cannot change SMP block configuration during scanout: */ bool full_modeset = false; @@ -346,16 +492,21 @@ static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase) return 0; } -static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, +static int calc_scalex_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, uint32_t phasex_steps[2]) { + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; uint32_t phasex_step; unsigned int hsub; int ret; ret = calc_phase_step(src, dest, &phasex_step); - if (ret) + if (ret) { + dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret); return ret; + } hsub = drm_format_horz_chroma_subsampling(pixel_format); @@ -365,16 +516,21 @@ static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, return 0; } -static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, +static int calc_scaley_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, uint32_t phasey_steps[2]) { + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; uint32_t phasey_step; unsigned int vsub; int ret; ret = calc_phase_step(src, dest, &phasey_step); - if (ret) + if (ret) { + dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret); return ret; + } vsub = drm_format_vert_chroma_subsampling(pixel_format); @@ -384,28 +540,38 @@ static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, return 0; } -static uint32_t get_scalex_config(uint32_t src, uint32_t dest) -{ - uint32_t filter; - - filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; - - return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | - MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter); -} - -static uint32_t get_scaley_config(uint32_t src, uint32_t dest) +static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample, + uint32_t src, uint32_t dest, bool hor) { - uint32_t filter; - - filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + uint32_t y_filter = (src <= dest) ? SCALE_FILTER_CA : SCALE_FILTER_PCMN; + uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */ + SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + uint32_t value = 0; + + if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) { + if (hor) + value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter); + else + value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter); + } else if (src != dest) { + if (hor) + value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter); + else + value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter); + } - return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter); + return value; } static int mdp5_plane_mode_set(struct drm_plane *plane, @@ -416,8 +582,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t src_w, uint32_t src_h) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + struct drm_plane_state *pstate = plane->state; struct mdp5_kms *mdp5_kms = get_kms(plane); - struct device *dev = mdp5_kms->dev->dev; enum mdp5_pipe pipe = mdp5_plane->pipe; const struct mdp_format *format; uint32_t nplanes, config = 0; @@ -425,6 +591,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,}; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; + bool vflip, hflip; unsigned long flags; int ret; @@ -449,7 +616,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, /* Request some memory from the SMP: */ ret = mdp5_smp_request(mdp5_kms->smp, - mdp5_plane->pipe, fb->pixel_format, src_w); + mdp5_plane->pipe, format, src_w, false); if (ret) return ret; @@ -461,29 +628,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, */ mdp5_smp_configure(mdp5_kms->smp, pipe); - /* SCALE is used to both scale and up-sample chroma components */ + ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); + if (ret) + return ret; - if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) { - /* TODO calc hdecm */ - ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step); - if (ret) { - dev_err(dev, "X scaling (%d -> %d) failed: %d\n", - src_w, crtc_w, ret); - return ret; - } - config |= get_scalex_config(src_w, crtc_w); - } + ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step); + if (ret) + return ret; - if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) { - /* TODO calc vdecm */ - ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step); - if (ret) { - dev_err(dev, "Y scaling (%d -> %d) failed: %d\n", - src_h, crtc_h, ret); - return ret; - } - config |= get_scaley_config(src_h, crtc_h); - } + /* TODO calc hdecm, vdecm */ + + /* SCALE is used to both scale and up-sample chroma components */ + config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true); + config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false); + DBG("scale config = %x", config); + + hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X)); + vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y)); spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); @@ -516,7 +677,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | - MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) | + MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), @@ -526,29 +687,35 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), + (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | + (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); /* not using secure mode: */ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), - phasex_step[0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), - phasey_step[0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), - phasex_step[1]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), - phasey_step[1]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), - MDP5_PIPE_DECIMATION_VERT(vdecm) | - MDP5_PIPE_DECIMATION_HORZ(hdecm)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); - - if (MDP_FORMAT_IS_YUV(format)) - csc_enable(mdp5_kms, pipe, - mdp_get_default_csc_cfg(CSC_YUV2RGB)); - else - csc_disable(mdp5_kms, pipe); + if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) { + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), + phasex_step[0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), + phasey_step[0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), + phasex_step[1]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), + phasey_step[1]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), + MDP5_PIPE_DECIMATION_VERT(vdecm) | + MDP5_PIPE_DECIMATION_HORZ(hdecm)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); + } + + if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) { + if (MDP_FORMAT_IS_YUV(format)) + csc_enable(mdp5_kms, pipe, + mdp_get_default_csc_cfg(CSC_YUV2RGB)); + else + csc_disable(mdp5_kms, pipe); + } set_scanout_locked(plane, fb); @@ -599,7 +766,8 @@ void mdp5_plane_complete_commit(struct drm_plane *plane, /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) + enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset, + uint32_t caps) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; @@ -616,9 +784,11 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, mdp5_plane->pipe = pipe; mdp5_plane->name = pipe2name(pipe); + mdp5_plane->caps = caps; - mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, - ARRAY_SIZE(mdp5_plane->formats)); + mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, + ARRAY_SIZE(mdp5_plane->formats), + !pipe_supports_yuv(mdp5_plane->caps)); mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe); mdp5_plane->reg_offset = reg_offset; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 64a27d86f2f5..563cca972dcb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -90,6 +90,8 @@ struct mdp5_smp { struct drm_device *dev; + const struct mdp5_smp_block *cfg; + int blk_cnt; int blk_size; @@ -137,14 +139,12 @@ static int smp_request_block(struct mdp5_smp *smp, u32 cid, int nblks) { struct mdp5_kms *mdp5_kms = get_kms(smp); - const struct mdp5_cfg_hw *hw_cfg; struct mdp5_client_smp_state *ps = &smp->client_state[cid]; int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; int reserved; unsigned long flags; - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - reserved = hw_cfg->smp.reserved[cid]; + reserved = smp->cfg->reserved[cid]; spin_lock_irqsave(&smp->state_lock, flags); @@ -209,12 +209,14 @@ static void set_fifo_thresholds(struct mdp5_smp *smp, * decimated width. Ie. SMP buffering sits downstream of decimation (which * presumably happens during the dma from scanout buffer). */ -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width) +int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, + const struct mdp_format *format, u32 width, bool hdecim) { struct mdp5_kms *mdp5_kms = get_kms(smp); struct drm_device *dev = mdp5_kms->dev; int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); int i, hsub, nplanes, nlines, nblks, ret; + u32 fmt = format->base.pixel_format; nplanes = drm_format_num_planes(fmt); hsub = drm_format_horz_chroma_subsampling(fmt); @@ -222,6 +224,21 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid /* different if BWC (compressed framebuffer?) enabled: */ nlines = 2; + /* Newer MDPs have split/packing logic, which fetches sub-sampled + * U and V components (splits them from Y if necessary) and packs + * them together, writes to SMP using a single client. + */ + if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { + fmt = DRM_FORMAT_NV24; + nplanes = 2; + + /* if decimation is enabled, HW decimates less on the + * sub sampled chroma components + */ + if (hdecim && (hsub > 1)) + hsub = 1; + } + for (i = 0, nblks = 0; i < nplanes; i++) { int n, fetch_stride, cpp; @@ -388,6 +405,7 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo } smp->dev = dev; + smp->cfg = cfg; smp->blk_cnt = cfg->mmb_count; smp->blk_size = cfg->mmb_size; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h index 5b6c2363f592..20b87e800ea3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h @@ -39,7 +39,8 @@ struct mdp5_smp; struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg); void mdp5_smp_destroy(struct mdp5_smp *smp); -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width); +int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, + const struct mdp_format *format, u32 width, bool hdecim); void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe); void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe); void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe); diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h index 641d036c5bcb..4f792c4e40f4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h @@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -46,7 +46,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. enum mdp_chroma_samp_type { - CHROMA_RGB = 0, + CHROMA_FULL = 0, CHROMA_H2V1 = 1, CHROMA_H1V2 = 2, CHROMA_420 = 3, @@ -65,6 +65,10 @@ enum mdp_mixer_stage_id { STAGE1 = 3, STAGE2 = 4, STAGE3 = 5, + STAGE4 = 6, + STAGE5 = 7, + STAGE6 = 8, + STAGE_MAX = 8, }; enum mdp_alpha_type { diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c index 7b0524dc1872..1c2caffc97e4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_format.c +++ b/drivers/gpu/drm/msm/mdp/mdp_format.c @@ -71,7 +71,7 @@ static struct csc_cfg csc_convert[CSC_MAX] = { }, }; -#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \ +#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ .base = { .pixel_format = DRM_FORMAT_ ## name }, \ .bpc_a = BPC ## a ## A, \ .bpc_r = BPC ## r, \ @@ -83,7 +83,8 @@ static struct csc_cfg csc_convert[CSC_MAX] = { .cpp = c, \ .unpack_count = cnt, \ .fetch_type = fp, \ - .chroma_sample = cs \ + .chroma_sample = cs, \ + .is_yuv = yuv, \ } #define BPC0A 0 @@ -95,30 +96,49 @@ static struct csc_cfg csc_convert[CSC_MAX] = { static const struct mdp_format formats[] = { /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_RGB), + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), /* --- RGB formats above / YUV formats below this line --- */ + /* 2 plane YUV */ FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), + FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + /* 1 plane YUV */ + FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + /* 3 plane YUV */ + FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), + FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), }; /* diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c index 1988c243f437..64287304054d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c @@ -39,7 +39,8 @@ static void update_irq(struct mdp_kms *mdp_kms) list_for_each_entry(irq, &mdp_kms->irq_list, node) irqmask |= irq->irqmask; - mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); + mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask); + mdp_kms->cur_irq_mask = irqmask; } /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index 2d3428cb74d0..46a94e7d50e2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -30,7 +30,8 @@ struct mdp_kms; struct mdp_kms_funcs { struct msm_kms_funcs base; - void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask); + void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); }; struct mdp_kms { @@ -42,6 +43,7 @@ struct mdp_kms { bool in_irq; struct list_head irq_list; /* list of mdp4_irq */ uint32_t vblank_mask; /* irq bits set for userspace vblank */ + uint32_t cur_irq_mask; /* current irq mask */ }; #define to_mdp_kms(x) container_of(x, struct mdp_kms, base) @@ -90,13 +92,27 @@ struct mdp_format { uint8_t cpp, unpack_count; enum mdp_fetch_type fetch_type; enum mdp_chroma_samp_type chroma_sample; + bool is_yuv; }; #define to_mdp_format(x) container_of(x, struct mdp_format, base) -#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB) +#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); +/* MDP pipe capabilities */ +#define MDP_PIPE_CAP_HFLIP BIT(0) +#define MDP_PIPE_CAP_VFLIP BIT(1) +#define MDP_PIPE_CAP_SCALE BIT(2) +#define MDP_PIPE_CAP_CSC BIT(3) +#define MDP_PIPE_CAP_DECIMATION BIT(4) + +static inline bool pipe_supports_yuv(uint32_t pipe_caps) +{ + return (pipe_caps & MDP_PIPE_CAP_SCALE) && + (pipe_caps & MDP_PIPE_CAP_CSC); +} + enum csc_type { CSC_RGB2RGB = 0, CSC_YUV2RGB, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d3467b115e04..0339c5d82d37 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -116,6 +116,65 @@ u32 msm_readl(const void __iomem *addr) return val; } +struct vblank_event { + struct list_head node; + int crtc_id; + bool enable; +}; + +static void vblank_ctrl_worker(struct work_struct *work) +{ + struct msm_vblank_ctrl *vbl_ctrl = container_of(work, + struct msm_vblank_ctrl, work); + struct msm_drm_private *priv = container_of(vbl_ctrl, + struct msm_drm_private, vblank_ctrl); + struct msm_kms *kms = priv->kms; + struct vblank_event *vbl_ev, *tmp; + unsigned long flags; + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { + list_del(&vbl_ev->node); + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + + if (vbl_ev->enable) + kms->funcs->enable_vblank(kms, + priv->crtcs[vbl_ev->crtc_id]); + else + kms->funcs->disable_vblank(kms, + priv->crtcs[vbl_ev->crtc_id]); + + kfree(vbl_ev); + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + } + + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); +} + +static int vblank_ctrl_queue_work(struct msm_drm_private *priv, + int crtc_id, bool enable) +{ + struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; + struct vblank_event *vbl_ev; + unsigned long flags; + + vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); + if (!vbl_ev) + return -ENOMEM; + + vbl_ev->crtc_id = crtc_id; + vbl_ev->enable = enable; + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + + queue_work(priv->wq, &vbl_ctrl->work); + + return 0; +} + /* * DRM operations: */ @@ -125,6 +184,18 @@ static int msm_unload(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_gpu *gpu = priv->gpu; + struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; + struct vblank_event *vbl_ev, *tmp; + + /* We must cancel and cleanup any pending vblank enable/disable + * work before drm_irq_uninstall() to avoid work re-enabling an + * irq after uninstall has disabled it. + */ + cancel_work_sync(&vbl_ctrl->work); + list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { + list_del(&vbl_ev->node); + kfree(vbl_ev); + } drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); @@ -282,6 +353,9 @@ static int msm_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->fence_cbs); + INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); + INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker); + spin_lock_init(&priv->vblank_ctrl.lock); drm_mode_config_init(dev); @@ -331,10 +405,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags) } } - dev->mode_config.min_width = 0; - dev->mode_config.min_height = 0; - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; dev->mode_config.funcs = &mode_config_funcs; ret = drm_vblank_init(dev, priv->num_crtcs); @@ -468,7 +538,7 @@ static int msm_enable_vblank(struct drm_device *dev, int crtc_id) if (!kms) return -ENXIO; DBG("dev=%p, crtc=%d", dev, crtc_id); - return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]); + return vblank_ctrl_queue_work(priv, crtc_id, true); } static void msm_disable_vblank(struct drm_device *dev, int crtc_id) @@ -478,7 +548,7 @@ static void msm_disable_vblank(struct drm_device *dev, int crtc_id) if (!kms) return; DBG("dev=%p, crtc=%d", dev, crtc_id); - kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]); + vblank_ctrl_queue_work(priv, crtc_id, false); } /* diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 4ff0ec9c994b..3be7a56b14f1 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -30,6 +30,7 @@ #include <linux/list.h> #include <linux/iommu.h> #include <linux/types.h> +#include <linux/of_graph.h> #include <asm/sizes.h> #ifndef CONFIG_OF @@ -64,6 +65,19 @@ struct msm_file_private { int dummy; }; +enum msm_mdp_plane_property { + PLANE_PROP_ZPOS, + PLANE_PROP_ALPHA, + PLANE_PROP_PREMULTIPLIED, + PLANE_PROP_MAX_NUM +}; + +struct msm_vblank_ctrl { + struct work_struct work; + struct list_head event_list; + spinlock_t lock; +}; + struct msm_drm_private { struct msm_kms *kms; @@ -128,6 +142,9 @@ struct msm_drm_private { unsigned int num_connectors; struct drm_connector *connectors[8]; + /* Properties */ + struct drm_property *plane_property[PLANE_PROP_MAX_NUM]; + /* VRAM carveout, used when no IOMMU: */ struct { unsigned long size; @@ -137,6 +154,8 @@ struct msm_drm_private { */ struct drm_mm mm; } vram; + + struct msm_vblank_ctrl vblank_ctrl; }; struct msm_format { diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 95f6532df02d..f97a1964ef39 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -43,11 +43,11 @@ static struct fb_ops msm_fb_ops = { /* Note: to properly handle manual update displays, we wrap the * basic fbdev ops which write to the framebuffer */ - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_read = drm_fb_helper_sys_read, + .fb_write = drm_fb_helper_sys_write, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_mmap = msm_fbdev_mmap, .fb_check_var = drm_fb_helper_check_var, @@ -144,10 +144,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, goto fail_unlock; } - fbi = framebuffer_alloc(0, dev->dev); - if (!fbi) { + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { dev_err(dev->dev, "failed to allocate fb info\n"); - ret = -ENOMEM; + ret = PTR_ERR(fbi); goto fail_unlock; } @@ -155,7 +155,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fbdev->fb = fb; helper->fb = fb; - helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_DEFAULT; @@ -163,12 +162,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, strcpy(fbi->fix.id, "msm"); - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto fail_unlock; - } - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); @@ -191,7 +184,6 @@ fail_unlock: fail: if (ret) { - framebuffer_release(fbi); if (fb) { drm_framebuffer_unregister_private(fb); drm_framebuffer_remove(fb); @@ -266,17 +258,11 @@ void msm_fbdev_free(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct drm_fb_helper *helper = priv->fbdev; struct msm_fbdev *fbdev; - struct fb_info *fbi; DBG(); - fbi = helper->fbdev; - - /* only cleanup framebuffer if it is present */ - if (fbi) { - unregister_framebuffer(fbi); - framebuffer_release(fbi); - } + drm_fb_helper_unregister_fbi(helper); + drm_fb_helper_release_fbi(helper); drm_fb_helper_fini(helper); diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 9f2498571d09..5f6ea1873f51 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -261,7 +261,7 @@ nv10_overlay_init(struct drm_device *device) { struct nouveau_drm *drm = nouveau_drm(device); struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); - int num_formats = ARRAY_SIZE(formats); + unsigned int num_formats = ARRAY_SIZE(formats); int ret; if (!plane) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 6751553abe4a..2791701685dc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -84,7 +84,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) if (ret != -ENODEV) nouveau_fbcon_gpu_lockup(info); - cfb_fillrect(info, rect); + drm_fb_helper_cfb_fillrect(info, rect); } static void @@ -116,7 +116,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) if (ret != -ENODEV) nouveau_fbcon_gpu_lockup(info); - cfb_copyarea(info, image); + drm_fb_helper_cfb_copyarea(info, image); } static void @@ -148,7 +148,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret != -ENODEV) nouveau_fbcon_gpu_lockup(info); - cfb_imageblit(info, image); + drm_fb_helper_cfb_imageblit(info, image); } static int @@ -197,9 +197,9 @@ static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -319,7 +319,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct nouveau_channel *chan; struct nouveau_bo *nvbo; struct drm_mode_fb_cmd2 mode_cmd; - struct pci_dev *pdev = dev->pdev; int size, ret; mode_cmd.width = sizes->surface_width; @@ -365,20 +364,13 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, mutex_lock(&dev->struct_mutex); - info = framebuffer_alloc(0, &pdev->dev); - if (!info) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_unlock; } info->skip_vt_switch = 1; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - framebuffer_release(info); - goto out_unlock; - } - info->par = fbcon; nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo); @@ -388,7 +380,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, /* setup helper */ fbcon->helper.fb = fb; - fbcon->helper.fbdev = info; strcpy(info->fix.id, "nouveaufb"); if (!chan) @@ -450,15 +441,9 @@ static int nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) { struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb; - struct fb_info *info; - if (fbcon->helper.fbdev) { - info = fbcon->helper.fbdev; - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&fbcon->helper); + drm_fb_helper_release_fbi(&fbcon->helper); if (nouveau_fb->nvbo) { nouveau_bo_unmap(nouveau_fb->nvbo); @@ -496,7 +481,7 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) console_lock(); if (state == FBINFO_STATE_RUNNING) nouveau_fbcon_accel_restore(dev); - fb_set_suspend(drm->fbcon->helper.fbdev, state); + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); if (state != FBINFO_STATE_RUNNING) nouveau_fbcon_accel_save_disable(dev); console_unlock(); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 7464aef34674..737e8f976a98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -433,10 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm) void nouveau_ttm_fini(struct nouveau_drm *drm) { - mutex_lock(&drm->dev->struct_mutex); ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); - mutex_unlock(&drm->dev->struct_mutex); ttm_bo_device_release(&drm->ttm.bdev); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index 52c22b026005..e10f9644140f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -166,30 +166,14 @@ gk104_fifo_context_attach(struct nvkm_object *parent, } static int -gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) -{ - struct nvkm_object *obj = (void *)chan; - struct gk104_fifo_priv *priv = (void *)obj->engine; - - nv_wr32(priv, 0x002634, chan->base.chid); - if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) { - nv_error(priv, "channel %d [%s] kick timeout\n", - chan->base.chid, nvkm_client_name(chan)); - return -EBUSY; - } - - return 0; -} - -static int gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_object *object) { struct nvkm_bar *bar = nvkm_bar(parent); + struct gk104_fifo_priv *priv = (void *)parent->engine; struct gk104_fifo_base *base = (void *)parent->parent; struct gk104_fifo_chan *chan = (void *)parent; u32 addr; - int ret; switch (nv_engidx(object->engine)) { case NVDEV_ENGINE_SW : return 0; @@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, return -EINVAL; } - ret = gk104_fifo_chan_kick(chan); - if (ret && suspend) - return ret; + nv_wr32(priv, 0x002634, chan->base.chid); + if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { + nv_error(priv, "channel %d [%s] kick timeout\n", + chan->base.chid, nvkm_client_name(chan)); + if (suspend) + return -EBUSY; + } if (addr) { nv_wo32(base, addr + 0x00, 0x00000000); @@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) gk104_fifo_runlist_update(priv, chan->engine); } - gk104_fifo_chan_kick(chan); nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); return nvkm_fifo_channel_fini(&chan->base, suspend); } diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 23d9c928cdc9..9a4ba4f03567 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -388,11 +388,13 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) copy_timings_drm_to_omap(&omap_crtc->timings, mode); } -static void omap_crtc_atomic_begin(struct drm_crtc *crtc) +static void omap_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { } -static void omap_crtc_atomic_flush(struct drm_crtc *crtc) +static void omap_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 720d16bce7e8..b8e4cdec28c3 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -86,11 +86,11 @@ static struct fb_ops omap_fb_ops = { /* Note: to properly handle manual update displays, we wrap the * basic fbdev ops which write to the framebuffer */ - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_read = drm_fb_helper_sys_read, + .fb_write = drm_fb_helper_sys_write, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, @@ -179,10 +179,10 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, mutex_lock(&dev->struct_mutex); - fbi = framebuffer_alloc(0, dev->dev); - if (!fbi) { + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { dev_err(dev->dev, "failed to allocate fb info\n"); - ret = -ENOMEM; + ret = PTR_ERR(fbi); goto fail_unlock; } @@ -190,7 +190,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, fbdev->fb = fb; helper->fb = fb; - helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_DEFAULT; @@ -198,12 +197,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, strcpy(fbi->fix.id, MODULE_NAME); - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto fail_unlock; - } - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); @@ -236,8 +229,9 @@ fail_unlock: fail: if (ret) { - if (fbi) - framebuffer_release(fbi); + + drm_fb_helper_release_fbi(helper); + if (fb) { drm_framebuffer_unregister_private(fb); drm_framebuffer_remove(fb); @@ -312,17 +306,11 @@ void omap_fbdev_free(struct drm_device *dev) struct omap_drm_private *priv = dev->dev_private; struct drm_fb_helper *helper = priv->fbdev; struct omap_fbdev *fbdev; - struct fb_info *fbi; DBG(); - fbi = helper->fbdev; - - /* only cleanup framebuffer if it is present */ - if (fbi) { - unregister_framebuffer(fbi); - framebuffer_release(fbi); - } + drm_fb_helper_unregister_fbi(helper); + drm_fb_helper_release_fbi(helper); drm_fb_helper_fini(helper); diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 6d64c7bb908b..7d4704b1292b 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -18,13 +18,21 @@ config DRM_PANEL_SIMPLE that it can be automatically turned off when the panel goes into a low power state. -config DRM_PANEL_LD9040 - tristate "LD9040 RGB/SPI panel" +config DRM_PANEL_SAMSUNG_LD9040 + tristate "Samsung LD9040 RGB/SPI panel" depends on OF && SPI select VIDEOMODE_HELPERS -config DRM_PANEL_S6E8AA0 - tristate "S6E8AA0 DSI video mode panel" +config DRM_PANEL_LG_LG4573 + tristate "LG4573 RGB/SPI panel" + depends on OF && SPI + select VIDEOMODE_HELPERS + help + Say Y here if you want to enable support for LG4573 RGB panel. + To compile this driver as a module, choose M here. + +config DRM_PANEL_SAMSUNG_S6E8AA0 + tristate "Samsung S6E8AA0 DSI video mode panel" depends on OF select DRM_MIPI_DSI select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 4b2a0430804b..d0f016dd7ddb 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o -obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o -obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o +obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c new file mode 100644 index 000000000000..a7b4939cee6d --- /dev/null +++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c @@ -0,0 +1,298 @@ +/* + * Copyright (C) 2015 Heiko Schocher <hs@denx.de> + * + * from: + * drivers/gpu/drm/panel/panel-ld9040.c + * ld9040 AMOLED LCD drm_panel driver. + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * Derived from drivers/video/backlight/ld9040.c + * + * Andrzej Hajda <a.hajda@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <drm/drmP.h> +#include <drm/drm_panel.h> + +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <video/mipi_display.h> +#include <video/of_videomode.h> +#include <video/videomode.h> + +struct lg4573 { + struct drm_panel panel; + struct spi_device *spi; + struct videomode vm; +}; + +static inline struct lg4573 *panel_to_lg4573(struct drm_panel *panel) +{ + return container_of(panel, struct lg4573, panel); +} + +static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data) +{ + struct spi_transfer xfer = { + .len = 2, + }; + u16 temp = cpu_to_be16(data); + struct spi_message msg; + + dev_dbg(ctx->panel.dev, "writing data: %x\n", data); + xfer.tx_buf = &temp; + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(ctx->spi, &msg); +} + +static int lg4573_spi_write_u16_array(struct lg4573 *ctx, const u16 *buffer, + unsigned int count) +{ + unsigned int i; + int ret; + + for (i = 0; i < count; i++) { + ret = lg4573_spi_write_u16(ctx, buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int lg4573_spi_write_dcs(struct lg4573 *ctx, u8 dcs) +{ + return lg4573_spi_write_u16(ctx, (0x70 << 8 | dcs)); +} + +static int lg4573_display_on(struct lg4573 *ctx) +{ + int ret; + + ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_EXIT_SLEEP_MODE); + if (ret) + return ret; + + msleep(5); + + return lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_ON); +} + +static int lg4573_display_off(struct lg4573 *ctx) +{ + int ret; + + ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_OFF); + if (ret) + return ret; + + msleep(120); + + return lg4573_spi_write_dcs(ctx, MIPI_DCS_ENTER_SLEEP_MODE); +} + +static int lg4573_display_mode_settings(struct lg4573 *ctx) +{ + static const u16 display_mode_settings[] = { + 0x703A, 0x7270, 0x70B1, 0x7208, + 0x723B, 0x720F, 0x70B2, 0x7200, + 0x72C8, 0x70B3, 0x7200, 0x70B4, + 0x7200, 0x70B5, 0x7242, 0x7210, + 0x7210, 0x7200, 0x7220, 0x70B6, + 0x720B, 0x720F, 0x723C, 0x7213, + 0x7213, 0x72E8, 0x70B7, 0x7246, + 0x7206, 0x720C, 0x7200, 0x7200, + }; + + dev_dbg(ctx->panel.dev, "transfer display mode settings\n"); + return lg4573_spi_write_u16_array(ctx, display_mode_settings, + ARRAY_SIZE(display_mode_settings)); +} + +static int lg4573_power_settings(struct lg4573 *ctx) +{ + static const u16 power_settings[] = { + 0x70C0, 0x7201, 0x7211, 0x70C3, + 0x7207, 0x7203, 0x7204, 0x7204, + 0x7204, 0x70C4, 0x7212, 0x7224, + 0x7218, 0x7218, 0x7202, 0x7249, + 0x70C5, 0x726F, 0x70C6, 0x7241, + 0x7263, + }; + + dev_dbg(ctx->panel.dev, "transfer power settings\n"); + return lg4573_spi_write_u16_array(ctx, power_settings, + ARRAY_SIZE(power_settings)); +} + +static int lg4573_gamma_settings(struct lg4573 *ctx) +{ + static const u16 gamma_settings[] = { + 0x70D0, 0x7203, 0x7207, 0x7273, + 0x7235, 0x7200, 0x7201, 0x7220, + 0x7200, 0x7203, 0x70D1, 0x7203, + 0x7207, 0x7273, 0x7235, 0x7200, + 0x7201, 0x7220, 0x7200, 0x7203, + 0x70D2, 0x7203, 0x7207, 0x7273, + 0x7235, 0x7200, 0x7201, 0x7220, + 0x7200, 0x7203, 0x70D3, 0x7203, + 0x7207, 0x7273, 0x7235, 0x7200, + 0x7201, 0x7220, 0x7200, 0x7203, + 0x70D4, 0x7203, 0x7207, 0x7273, + 0x7235, 0x7200, 0x7201, 0x7220, + 0x7200, 0x7203, 0x70D5, 0x7203, + 0x7207, 0x7273, 0x7235, 0x7200, + 0x7201, 0x7220, 0x7200, 0x7203, + }; + + dev_dbg(ctx->panel.dev, "transfer gamma settings\n"); + return lg4573_spi_write_u16_array(ctx, gamma_settings, + ARRAY_SIZE(gamma_settings)); +} + +static int lg4573_init(struct lg4573 *ctx) +{ + int ret; + + dev_dbg(ctx->panel.dev, "initializing LCD\n"); + + ret = lg4573_display_mode_settings(ctx); + if (ret) + return ret; + + ret = lg4573_power_settings(ctx); + if (ret) + return ret; + + return lg4573_gamma_settings(ctx); +} + +static int lg4573_power_on(struct lg4573 *ctx) +{ + return lg4573_display_on(ctx); +} + +static int lg4573_disable(struct drm_panel *panel) +{ + struct lg4573 *ctx = panel_to_lg4573(panel); + + return lg4573_display_off(ctx); +} + +static int lg4573_enable(struct drm_panel *panel) +{ + struct lg4573 *ctx = panel_to_lg4573(panel); + + lg4573_init(ctx); + + return lg4573_power_on(ctx); +} + +static const struct drm_display_mode default_mode = { + .clock = 27000, + .hdisplay = 480, + .hsync_start = 480 + 10, + .hsync_end = 480 + 10 + 59, + .htotal = 480 + 10 + 59 + 10, + .vdisplay = 800, + .vsync_start = 800 + 15, + .vsync_end = 800 + 15 + 15, + .vtotal = 800 + 15 + 15 + 15, + .vrefresh = 60, +}; + +static int lg4573_get_modes(struct drm_panel *panel) +{ + struct drm_connector *connector = panel->connector; + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(panel->drm, &default_mode); + if (!mode) { + dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n", + default_mode.hdisplay, default_mode.vdisplay, + default_mode.vrefresh); + return -ENOMEM; + } + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + + panel->connector->display_info.width_mm = 61; + panel->connector->display_info.height_mm = 103; + + return 1; +} + +static const struct drm_panel_funcs lg4573_drm_funcs = { + .disable = lg4573_disable, + .enable = lg4573_enable, + .get_modes = lg4573_get_modes, +}; + +static int lg4573_probe(struct spi_device *spi) +{ + struct lg4573 *ctx; + int ret; + + ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->spi = spi; + + spi_set_drvdata(spi, ctx); + spi->bits_per_word = 8; + + ret = spi_setup(spi); + if (ret < 0) { + dev_err(&spi->dev, "SPI setup failed: %d\n", ret); + return ret; + } + + drm_panel_init(&ctx->panel); + ctx->panel.dev = &spi->dev; + ctx->panel.funcs = &lg4573_drm_funcs; + + return drm_panel_add(&ctx->panel); +} + +static int lg4573_remove(struct spi_device *spi) +{ + struct lg4573 *ctx = spi_get_drvdata(spi); + + lg4573_display_off(ctx); + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id lg4573_of_match[] = { + { .compatible = "lg,lg4573" }, + { } +}; +MODULE_DEVICE_TABLE(of, lg4573_of_match); + +static struct spi_driver lg4573_driver = { + .probe = lg4573_probe, + .remove = lg4573_remove, + .driver = { + .name = "lg4573", + .owner = THIS_MODULE, + .of_match_table = lg4573_of_match, + }, +}; +module_spi_driver(lg4573_driver); + +MODULE_AUTHOR("Heiko Schocher <hs@denx.de>"); +MODULE_DESCRIPTION("lg4573 LCD Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c index 9c27bded4c09..b202377135e7 100644 --- a/drivers/gpu/drm/panel/panel-ld9040.c +++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c @@ -377,7 +377,7 @@ static struct spi_driver ld9040_driver = { .probe = ld9040_probe, .remove = ld9040_remove, .driver = { - .name = "ld9040", + .name = "panel-samsung-ld9040", .owner = THIS_MODULE, .of_match_table = ld9040_of_match, }, diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c index 30051108eec4..a188a3959f1a 100644 --- a/drivers/gpu/drm/panel/panel-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c @@ -1051,7 +1051,7 @@ static struct mipi_dsi_driver s6e8aa0_driver = { .probe = s6e8aa0_probe, .remove = s6e8aa0_remove, .driver = { - .name = "panel_s6e8aa0", + .name = "panel-samsung-s6e8aa0", .of_match_table = s6e8aa0_of_match, }, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index f94201b6e882..f97b73ec4713 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -713,7 +713,12 @@ static const struct display_timing hannstar_hsd070pww1_timing = { .hactive = { 1280, 1280, 1280 }, .hfront_porch = { 1, 1, 10 }, .hback_porch = { 1, 1, 10 }, - .hsync_len = { 52, 158, 661 }, + /* + * According to the data sheet, the minimum horizontal blanking interval + * is 54 clocks (1 + 52 + 1), but tests with a Nitrogen6X have shown the + * minimum working horizontal blanking interval to be 60 clocks. + */ + .hsync_len = { 58, 158, 661 }, .vactive = { 800, 800, 800 }, .vfront_porch = { 1, 1, 10 }, .vback_porch = { 1, 1, 10 }, @@ -729,6 +734,7 @@ static const struct panel_desc hannstar_hsd070pww1 = { .width = 151, .height = 94, }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, }; static const struct display_timing hannstar_hsd100pxn1_timing = { @@ -943,6 +949,60 @@ static const struct panel_desc lg_lp129qe = { }, }; +static const struct drm_display_mode nec_nl4827hc19_05b_mode = { + .clock = 10870, + .hdisplay = 480, + .hsync_start = 480 + 2, + .hsync_end = 480 + 2 + 41, + .htotal = 480 + 2 + 41 + 2, + .vdisplay = 272, + .vsync_start = 272 + 2, + .vsync_end = 272 + 2 + 4, + .vtotal = 272 + 2 + 4 + 2, + .vrefresh = 74, +}; + +static const struct panel_desc nec_nl4827hc19_05b = { + .modes = &nec_nl4827hc19_05b_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 95, + .height = 54, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24 +}; + +static const struct display_timing okaya_rs800480t_7x0gp_timing = { + .pixelclock = { 30000000, 30000000, 40000000 }, + .hactive = { 800, 800, 800 }, + .hfront_porch = { 40, 40, 40 }, + .hback_porch = { 40, 40, 40 }, + .hsync_len = { 1, 48, 48 }, + .vactive = { 480, 480, 480 }, + .vfront_porch = { 13, 13, 13 }, + .vback_porch = { 29, 29, 29 }, + .vsync_len = { 3, 3, 3 }, + .flags = DISPLAY_FLAGS_DE_HIGH, +}; + +static const struct panel_desc okaya_rs800480t_7x0gp = { + .timings = &okaya_rs800480t_7x0gp_timing, + .num_timings = 1, + .bpc = 6, + .size = { + .width = 154, + .height = 87, + }, + .delay = { + .prepare = 41, + .enable = 50, + .unprepare = 41, + .disable = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, +}; + static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { .clock = 25000, .hdisplay = 480, @@ -1113,6 +1173,12 @@ static const struct of_device_id platform_of_match[] = { .compatible = "lg,lp129qe", .data = &lg_lp129qe, }, { + .compatible = "nec,nl4827hc19-05b", + .data = &nec_nl4827hc19_05b, + }, { + .compatible = "okaya,rs800480t-7x0gp", + .data = &okaya_rs800480t_7x0gp, + }, { .compatible = "ortustech,com43h4m85ulc", .data = &ortustech_com43h4m85ulc, }, { @@ -1169,6 +1235,34 @@ struct panel_desc_dsi { unsigned int lanes; }; +static const struct drm_display_mode auo_b080uan01_mode = { + .clock = 154500, + .hdisplay = 1200, + .hsync_start = 1200 + 62, + .hsync_end = 1200 + 62 + 4, + .htotal = 1200 + 62 + 4 + 62, + .vdisplay = 1920, + .vsync_start = 1920 + 9, + .vsync_end = 1920 + 9 + 2, + .vtotal = 1920 + 9 + 2 + 8, + .vrefresh = 60, +}; + +static const struct panel_desc_dsi auo_b080uan01 = { + .desc = { + .modes = &auo_b080uan01_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 272, + }, + }, + .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS, + .format = MIPI_DSI_FMT_RGB888, + .lanes = 4, +}; + static const struct drm_display_mode lg_ld070wx3_sl01_mode = { .clock = 71000, .hdisplay = 800, @@ -1256,6 +1350,9 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = { static const struct of_device_id dsi_of_match[] = { { + .compatible = "auo,b080uan01", + .data = &auo_b080uan01 + }, { .compatible = "lg,ld070wx3-sl01", .data = &lg_ld070wx3_sl01 }, { diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 6b6e57e8c2d6..41c422fee31a 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -197,7 +197,7 @@ static void qxl_fb_fillrect(struct fb_info *info, { struct qxl_fbdev *qfbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width, rect->height); } @@ -207,7 +207,7 @@ static void qxl_fb_copyarea(struct fb_info *info, { struct qxl_fbdev *qfbdev = info->par; - sys_copyarea(info, area); + drm_fb_helper_sys_copyarea(info, area); qxl_dirty_update(qfbdev, area->dx, area->dy, area->width, area->height); } @@ -217,7 +217,7 @@ static void qxl_fb_imageblit(struct fb_info *info, { struct qxl_fbdev *qfbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); qxl_dirty_update(qfbdev, image->dx, image->dy, image->width, image->height); } @@ -345,7 +345,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct qxl_bo *qbo = NULL; - struct device *device = &qdev->pdev->dev; int ret; int size; int bpp = sizes->surface_bpp; @@ -374,9 +373,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, shadow); size = mode_cmd.pitches[0] * mode_cmd.height; - info = framebuffer_alloc(0, device); - if (info == NULL) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(&qfbdev->helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_unref; } @@ -388,7 +387,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, /* setup helper with fb data */ qfbdev->helper.fb = fb; - qfbdev->helper.fbdev = info; + qfbdev->shadow = shadow; strcpy(info->fix.id, "qxldrmfb"); @@ -410,11 +409,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_unref; - } info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = qdev->vram_size; @@ -423,13 +417,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, if (info->screen_base == NULL) { ret = -ENOSPC; - goto out_unref; - } - - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out_unref; + goto out_destroy_fbi; } info->fbdefio = &qxl_defio; @@ -441,6 +429,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height); return 0; +out_destroy_fbi: + drm_fb_helper_release_fbi(&qfbdev->helper); out_unref: if (qbo) { ret = qxl_bo_reserve(qbo, false); @@ -479,15 +469,11 @@ static int qxl_fb_find_or_create_single( static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) { - struct fb_info *info; struct qxl_framebuffer *qfb = &qfbdev->qfb; - if (qfbdev->helper.fbdev) { - info = qfbdev->helper.fbdev; + drm_fb_helper_unregister_fbi(&qfbdev->helper); + drm_fb_helper_release_fbi(&qfbdev->helper); - unregister_framebuffer(info); - framebuffer_release(info); - } if (qfb->obj) { qxlfb_destroy_pinned_object(qfb->obj); qfb->obj = NULL; @@ -557,7 +543,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev) void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) { - fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state); + drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state); } bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj) diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 6d6f33de48f4..b28370e014c6 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -272,7 +272,6 @@ void qxl_bo_force_delete(struct qxl_device *qdev) return; dev_err(qdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { - mutex_lock(&qdev->ddev->struct_mutex); dev_err(qdev->dev, "%p %p %lu %lu force free\n", &bo->gem_base, bo, (unsigned long)bo->gem_base.size, *((unsigned long *)&bo->gem_base.refcount)); @@ -280,8 +279,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev) list_del_init(&bo->list); mutex_unlock(&qdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_unreference(&bo->gem_base); - mutex_unlock(&qdev->ddev->struct_mutex); + drm_gem_object_unreference_unlocked(&bo->gem_base); } } diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 44480c1b9738..752072771388 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -76,16 +76,35 @@ static void dce6_afmt_get_connected_pins(struct radeon_device *rdev) struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev) { - int i; + struct drm_encoder *encoder; + struct radeon_encoder *radeon_encoder; + struct radeon_encoder_atom_dig *dig; + struct r600_audio_pin *pin = NULL; + int i, pin_count; dce6_afmt_get_connected_pins(rdev); for (i = 0; i < rdev->audio.num_pins; i++) { - if (rdev->audio.pin[i].connected) - return &rdev->audio.pin[i]; + if (rdev->audio.pin[i].connected) { + pin = &rdev->audio.pin[i]; + pin_count = 0; + + list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { + if (radeon_encoder_is_digital(encoder)) { + radeon_encoder = to_radeon_encoder(encoder); + dig = radeon_encoder->enc_priv; + if (dig->pin == pin) + pin_count++; + } + } + + if (pin_count == 0) + return pin; + } } - DRM_ERROR("No connected audio pins found!\n"); - return NULL; + if (!pin) + DRM_ERROR("No connected audio pins found!\n"); + return pin; } void dce6_afmt_select_pin(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index aeb676708e60..7214858ffcea 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -82,9 +82,9 @@ static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = radeon_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -227,7 +227,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; - struct device *device = &rdev->pdev->dev; int ret; unsigned long tmp; @@ -250,9 +249,9 @@ static int radeonfb_create(struct drm_fb_helper *helper, rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ - info = framebuffer_alloc(0, device); - if (info == NULL) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_unref; } @@ -262,14 +261,13 @@ static int radeonfb_create(struct drm_fb_helper *helper, ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); - goto out_unref; + goto out_destroy_fbi; } fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; - rfbdev->helper.fbdev = info; memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); @@ -289,11 +287,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_unref; - } info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = rdev->mc.aper_size; @@ -301,13 +294,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, if (info->screen_base == NULL) { ret = -ENOSPC; - goto out_unref; - } - - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out_unref; + goto out_destroy_fbi; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); @@ -319,6 +306,8 @@ static int radeonfb_create(struct drm_fb_helper *helper, vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); return 0; +out_destroy_fbi: + drm_fb_helper_release_fbi(helper); out_unref: if (rbo) { @@ -339,17 +328,10 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev) static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) { - struct fb_info *info; struct radeon_framebuffer *rfb = &rfbdev->rfb; - if (rfbdev->helper.fbdev) { - info = rfbdev->helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&rfbdev->helper); + drm_fb_helper_release_fbi(&rfbdev->helper); if (rfb->obj) { radeonfb_destroy_pinned_object(rfb->obj); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 1162bfa464f3..171d3e43c30c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + /* we can race here at startup, some boards seem to trigger + * hotplug irqs when they shouldn't. */ + if (!rdev->mode_info.mode_config_initialized) + return; + mutex_lock(&mode_config->mutex); if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 676362769b8d..d3024883b844 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -419,7 +419,6 @@ void radeon_bo_force_delete(struct radeon_device *rdev) } dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { - mutex_lock(&rdev->ddev->struct_mutex); dev_err(rdev->dev, "%p %p %lu %lu force free\n", &bo->gem_base, bo, (unsigned long)bo->gem_base.size, *((unsigned long *)&bo->gem_base.refcount)); @@ -427,8 +426,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev) list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_unreference(&bo->gem_base); - mutex_unlock(&rdev->ddev->struct_mutex); + drm_gem_object_unreference_unlocked(&bo->gem_base); } } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index c1ba83a8dd8c..05751f3f8444 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -253,7 +253,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) return; - mutex_lock(&rdev->ddev->struct_mutex); down_write(&rdev->pm.mclk_lock); mutex_lock(&rdev->ring_lock); @@ -268,7 +267,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) /* needs a GPU reset dont reset here */ mutex_unlock(&rdev->ring_lock); up_write(&rdev->pm.mclk_lock); - mutex_unlock(&rdev->ddev->struct_mutex); return; } } @@ -304,7 +302,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) mutex_unlock(&rdev->ring_lock); up_write(&rdev->pm.mclk_lock); - mutex_unlock(&rdev->ddev->struct_mutex); } static void radeon_pm_print_states(struct radeon_device *rdev) @@ -1062,7 +1059,6 @@ force: radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); } - mutex_lock(&rdev->ddev->struct_mutex); down_write(&rdev->pm.mclk_lock); mutex_lock(&rdev->ring_lock); @@ -1113,7 +1109,6 @@ force: done: mutex_unlock(&rdev->ring_lock); up_write(&rdev->pm.mclk_lock); - mutex_unlock(&rdev->ddev->struct_mutex); } void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index 5b0dc0f6fd94..f261512bb4a0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -37,9 +37,9 @@ static int rockchip_fbdev_mmap(struct fb_info *info, static struct fb_ops rockchip_drm_fbdev_ops = { .owner = THIS_MODULE, .fb_mmap = rockchip_fbdev_mmap, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, @@ -77,10 +77,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, private->fbdev_bo = &rk_obj->base; - fbi = framebuffer_alloc(0, dev->dev); - if (!fbi) { - dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); - ret = -ENOMEM; + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { + dev_err(dev->dev, "Failed to create framebuffer info.\n"); + ret = PTR_ERR(fbi); goto err_rockchip_gem_free_object; } @@ -89,21 +89,13 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, if (IS_ERR(helper->fb)) { dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); ret = PTR_ERR(helper->fb); - goto err_framebuffer_release; + goto err_release_fbi; } - helper->fbdev = fbi; - fbi->par = helper; fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &rockchip_drm_fbdev_ops; - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - dev_err(dev->dev, "Failed to allocate color map.\n"); - goto err_drm_framebuffer_unref; - } - fb = helper->fb; drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); @@ -124,10 +116,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, return 0; -err_drm_framebuffer_unref: - drm_framebuffer_unreference(helper->fb); -err_framebuffer_release: - framebuffer_release(fbi); +err_release_fbi: + drm_fb_helper_release_fbi(helper); err_rockchip_gem_free_object: rockchip_gem_free_object(&rk_obj->base); return ret; @@ -190,21 +180,8 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev) helper = &private->fbdev_helper; - if (helper->fbdev) { - struct fb_info *info; - int ret; - - info = helper->fbdev; - ret = unregister_framebuffer(info); - if (ret < 0) - DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n", - ret); - - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(helper); + drm_fb_helper_release_fbi(helper); if (helper->fb) drm_framebuffer_unreference(helper->fb); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index eba5f8a52fbd..a6d9104f7f15 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -200,13 +200,10 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, struct drm_gem_object *obj; int ret; - mutex_lock(&dev->struct_mutex); - obj = drm_gem_object_lookup(dev, file_priv, handle); if (!obj) { DRM_ERROR("failed to lookup gem object.\n"); - ret = -EINVAL; - goto unlock; + return -EINVAL; } ret = drm_gem_create_mmap_offset(obj); @@ -217,10 +214,9 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); out: - drm_gem_object_unreference(obj); -unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + drm_gem_object_unreference_unlocked(obj); + + return 0; } /* diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile index f0f1e4ee2d92..e27490b492a5 100644 --- a/drivers/gpu/drm/sti/Makefile +++ b/drivers/gpu/drm/sti/Makefile @@ -1,12 +1,11 @@ sticompositor-y := \ - sti_layer.o \ sti_mixer.o \ sti_gdp.o \ sti_vid.o \ sti_cursor.o \ sti_compositor.o \ - sti_drm_crtc.o \ - sti_drm_plane.o + sti_crtc.o \ + sti_plane.o stihdmi-y := sti_hdmi.o \ sti_hdmi_tx3g0c55phy.o \ @@ -24,4 +23,4 @@ obj-$(CONFIG_DRM_STI) = \ sticompositor.o \ sti_hqvdp.o \ stidvo.o \ - sti_drm_drv.o + sti_drv.o diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 43215d3020fb..c652627b1bca 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -14,10 +14,12 @@ #include <drm/drmP.h> #include "sti_compositor.h" -#include "sti_drm_crtc.h" -#include "sti_drm_drv.h" -#include "sti_drm_plane.h" +#include "sti_crtc.h" +#include "sti_cursor.h" +#include "sti_drv.h" #include "sti_gdp.h" +#include "sti_plane.h" +#include "sti_vid.h" #include "sti_vtg.h" /* @@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = { {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, - {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, + {STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700}, {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}, {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00}, }, @@ -53,14 +55,29 @@ struct sti_compositor_data stih416_compositor_data = { }, }; -static int sti_compositor_init_subdev(struct sti_compositor *compo, - struct sti_compositor_subdev_descriptor *desc, - unsigned int array_size) +static int sti_compositor_bind(struct device *dev, + struct device *master, + void *data) { - unsigned int i, mixer_id = 0, layer_id = 0; + struct sti_compositor *compo = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0; + struct sti_private *dev_priv = drm_dev->dev_private; + struct drm_plane *cursor = NULL; + struct drm_plane *primary = NULL; + struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc; + unsigned int array_size = compo->data.nb_subdev; + + dev_priv->compo = compo; + /* Register mixer subdev and video subdev first */ for (i = 0; i < array_size; i++) { switch (desc[i].type) { + case STI_VID_SUBDEV: + compo->vid[vid_id++] = + sti_vid_create(compo->dev, desc[i].id, + compo->regs + desc[i].offset); + break; case STI_MIXER_MAIN_SUBDEV: case STI_MIXER_AUX_SUBDEV: compo->mixer[mixer_id++] = @@ -68,83 +85,68 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo, compo->regs + desc[i].offset); break; case STI_GPD_SUBDEV: - case STI_VID_SUBDEV: case STI_CURSOR_SUBDEV: - compo->layer[layer_id++] = - sti_layer_create(compo->dev, desc[i].id, - compo->regs + desc[i].offset); + /* Nothing to do, wait for the second round */ break; default: DRM_ERROR("Unknow subdev compoment type\n"); return 1; } - } - compo->nb_mixers = mixer_id; - compo->nb_layers = layer_id; - - return 0; -} - -static int sti_compositor_bind(struct device *dev, struct device *master, - void *data) -{ - struct sti_compositor *compo = dev_get_drvdata(dev); - struct drm_device *drm_dev = data; - unsigned int i, crtc = 0, plane = 0; - struct sti_drm_private *dev_priv = drm_dev->dev_private; - struct drm_plane *cursor = NULL; - struct drm_plane *primary = NULL; - dev_priv->compo = compo; - - for (i = 0; i < compo->nb_layers; i++) { - if (compo->layer[i]) { - enum sti_layer_desc desc = compo->layer[i]->desc; - enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK; - enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY; + /* Register the other subdevs, create crtc and planes */ + for (i = 0; i < array_size; i++) { + enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY; - if (crtc < compo->nb_mixers) - plane_type = DRM_PLANE_TYPE_PRIMARY; + if (crtc_id < mixer_id) + plane_type = DRM_PLANE_TYPE_PRIMARY; - switch (type) { - case STI_CUR: - cursor = sti_drm_plane_init(drm_dev, - compo->layer[i], - 1, DRM_PLANE_TYPE_CURSOR); - break; - case STI_GDP: - case STI_VID: - primary = sti_drm_plane_init(drm_dev, - compo->layer[i], - (1 << compo->nb_mixers) - 1, - plane_type); - plane++; + switch (desc[i].type) { + case STI_MIXER_MAIN_SUBDEV: + case STI_MIXER_AUX_SUBDEV: + case STI_VID_SUBDEV: + /* Nothing to do, already done at the first round */ + break; + case STI_CURSOR_SUBDEV: + cursor = sti_cursor_create(drm_dev, compo->dev, + desc[i].id, + compo->regs + desc[i].offset, + 1); + if (!cursor) { + DRM_ERROR("Can't create CURSOR plane\n"); break; - case STI_BCK: - case STI_VDP: + } + break; + case STI_GPD_SUBDEV: + primary = sti_gdp_create(drm_dev, compo->dev, + desc[i].id, + compo->regs + desc[i].offset, + (1 << mixer_id) - 1, + plane_type); + if (!primary) { + DRM_ERROR("Can't create GDP plane\n"); break; } + break; + default: + DRM_ERROR("Unknown subdev compoment type\n"); + return 1; + } - /* The first planes are reserved for primary planes*/ - if (crtc < compo->nb_mixers && primary) { - sti_drm_crtc_init(drm_dev, compo->mixer[crtc], - primary, cursor); - crtc++; - cursor = NULL; - primary = NULL; - } + /* The first planes are reserved for primary planes*/ + if (crtc_id < mixer_id && primary) { + sti_crtc_init(drm_dev, compo->mixer[crtc_id], + primary, cursor); + crtc_id++; + cursor = NULL; + primary = NULL; } } - drm_vblank_init(drm_dev, crtc); + drm_vblank_init(drm_dev, crtc_id); /* Allow usage of vblank without having to call drm_irq_install */ drm_dev->irq_enabled = 1; - DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n", - crtc, plane); - DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n"); - return 0; } @@ -179,7 +181,6 @@ static int sti_compositor_probe(struct platform_device *pdev) struct device_node *vtg_np; struct sti_compositor *compo; struct resource *res; - int err; compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); if (!compo) { @@ -187,7 +188,7 @@ static int sti_compositor_probe(struct platform_device *pdev) return -ENOMEM; } compo->dev = dev; - compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb; + compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb; /* populate data structure depending on compatibility */ BUG_ON(!of_match_node(compositor_of_match, np)->data); @@ -251,12 +252,6 @@ static int sti_compositor_probe(struct platform_device *pdev) if (vtg_np) compo->vtg_aux = of_vtg_find(vtg_np); - /* Initialize compositor subdevices */ - err = sti_compositor_init_subdev(compo, compo->data.subdev_desc, - compo->data.nb_subdev); - if (err) - return err; - platform_set_drvdata(pdev, compo); return component_add(&pdev->dev, &sti_compositor_ops); diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h index 019eb44c62cc..1a4a73dab11e 100644 --- a/drivers/gpu/drm/sti/sti_compositor.h +++ b/drivers/gpu/drm/sti/sti_compositor.h @@ -12,13 +12,13 @@ #include <linux/clk.h> #include <linux/kernel.h> -#include "sti_layer.h" #include "sti_mixer.h" +#include "sti_plane.h" #define WAIT_NEXT_VSYNC_MS 50 /*ms*/ -#define STI_MAX_LAYER 8 #define STI_MAX_MIXER 2 +#define STI_MAX_VID 1 enum sti_compositor_subdev_type { STI_MIXER_MAIN_SUBDEV, @@ -59,11 +59,9 @@ struct sti_compositor_data { * @rst_main: reset control of the main path * @rst_aux: reset control of the aux path * @mixer: array of mixers + * @vid: array of vids * @vtg_main: vtg for main data path * @vtg_aux: vtg for auxillary data path - * @layer: array of layers - * @nb_mixers: number of mixers for this compositor - * @nb_layers: number of layers (GDP,VID,...) for this compositor * @vtg_vblank_nb: callback for VTG VSYNC notification */ struct sti_compositor { @@ -77,11 +75,9 @@ struct sti_compositor { struct reset_control *rst_main; struct reset_control *rst_aux; struct sti_mixer *mixer[STI_MAX_MIXER]; + struct sti_vid *vid[STI_MAX_VID]; struct sti_vtg *vtg_main; struct sti_vtg *vtg_aux; - struct sti_layer *layer[STI_MAX_LAYER]; - int nb_mixers; - int nb_layers; struct notifier_block vtg_vblank_nb; }; diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 26e63bf14efe..018ffc970e96 100644 --- a/drivers/gpu/drm/sti/sti_drm_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -15,22 +15,20 @@ #include <drm/drm_plane_helper.h> #include "sti_compositor.h" -#include "sti_drm_drv.h" -#include "sti_drm_crtc.h" +#include "sti_crtc.h" +#include "sti_drv.h" +#include "sti_vid.h" #include "sti_vtg.h" -static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - DRM_DEBUG_KMS("\n"); -} - -static void sti_drm_crtc_prepare(struct drm_crtc *crtc) +static void sti_crtc_enable(struct drm_crtc *crtc) { struct sti_mixer *mixer = to_sti_mixer(crtc); struct device *dev = mixer->dev; struct sti_compositor *compo = dev_get_drvdata(dev); - mixer->enabled = true; + DRM_DEBUG_DRIVER("\n"); + + mixer->status = STI_MIXER_READY; /* Prepare and enable the compo IP clock */ if (mixer->id == STI_MIXER_MAIN) { @@ -41,45 +39,28 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc) DRM_INFO("Failed to prepare/enable compo_aux clk\n"); } - sti_mixer_clear_all_layers(mixer); + drm_crtc_vblank_on(crtc); } -static void sti_drm_crtc_commit(struct drm_crtc *crtc) +static void sti_crtc_disabling(struct drm_crtc *crtc) { struct sti_mixer *mixer = to_sti_mixer(crtc); - struct device *dev = mixer->dev; - struct sti_compositor *compo = dev_get_drvdata(dev); - struct sti_layer *layer; - - if ((!mixer || !compo)) { - DRM_ERROR("Can not find mixer or compositor)\n"); - return; - } - /* get GDP which is reserved to the CRTC FB */ - layer = to_sti_layer(crtc->primary); - if (layer) - sti_layer_commit(layer); - else - DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n"); - - /* Enable layer on mixer */ - if (sti_mixer_set_layer_status(mixer, layer, true)) - DRM_ERROR("Can not enable layer at mixer\n"); + DRM_DEBUG_DRIVER("\n"); - drm_crtc_vblank_on(crtc); + mixer->status = STI_MIXER_DISABLING; } -static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { /* accept the provided drm_display_mode, do not fix it up */ return true; } static int -sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) +sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct sti_mixer *mixer = to_sti_mixer(crtc); struct device *dev = mixer->dev; @@ -122,22 +103,19 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) res = sti_mixer_active_video_area(mixer, &crtc->mode); if (res) { - DRM_ERROR("Can not set active video area\n"); + DRM_ERROR("Can't set active video area\n"); return -EINVAL; } return res; } -static void sti_drm_crtc_disable(struct drm_crtc *crtc) +static void sti_crtc_disable(struct drm_crtc *crtc) { struct sti_mixer *mixer = to_sti_mixer(crtc); struct device *dev = mixer->dev; struct sti_compositor *compo = dev_get_drvdata(dev); - if (!mixer->enabled) - return; - DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); /* Disable Background */ @@ -154,18 +132,18 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc) clk_disable_unprepare(compo->clk_compo_aux); } - mixer->enabled = false; + mixer->status = STI_MIXER_DISABLED; } static void -sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) +sti_crtc_mode_set_nofb(struct drm_crtc *crtc) { - sti_drm_crtc_prepare(crtc); - sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode); + sti_crtc_enable(crtc); + sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); } -static void sti_drm_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) +static void sti_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct sti_mixer *mixer = to_sti_mixer(crtc); @@ -179,47 +157,109 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc, } } -static void sti_drm_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) +static void sti_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { + struct drm_device *drm_dev = crtc->dev; + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct sti_compositor *compo = dev_get_drvdata(mixer->dev); + struct drm_plane *p; + + DRM_DEBUG_DRIVER("\n"); + + /* perform plane actions */ + list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { + struct sti_plane *plane = to_sti_plane(p); + + switch (plane->status) { + case STI_PLANE_UPDATED: + /* update planes tag as updated */ + DRM_DEBUG_DRIVER("update plane %s\n", + sti_plane_to_str(plane)); + + if (sti_mixer_set_plane_depth(mixer, plane)) { + DRM_ERROR("Cannot set plane %s depth\n", + sti_plane_to_str(plane)); + break; + } + + if (sti_mixer_set_plane_status(mixer, plane, true)) { + DRM_ERROR("Cannot enable plane %s at mixer\n", + sti_plane_to_str(plane)); + break; + } + + /* if plane is HQVDP_0 then commit the vid[0] */ + if (plane->desc == STI_HQVDP_0) + sti_vid_commit(compo->vid[0], p->state); + + plane->status = STI_PLANE_READY; + + break; + case STI_PLANE_DISABLING: + /* disabling sequence for planes tag as disabling */ + DRM_DEBUG_DRIVER("disable plane %s from mixer\n", + sti_plane_to_str(plane)); + + if (sti_mixer_set_plane_status(mixer, plane, false)) { + DRM_ERROR("Cannot disable plane %s at mixer\n", + sti_plane_to_str(plane)); + continue; + } + + if (plane->desc == STI_CURSOR) + /* tag plane status for disabled */ + plane->status = STI_PLANE_DISABLED; + else + /* tag plane status for flushing */ + plane->status = STI_PLANE_FLUSHING; + + /* if plane is HQVDP_0 then disable the vid[0] */ + if (plane->desc == STI_HQVDP_0) + sti_vid_disable(compo->vid[0]); + + break; + default: + /* Other status case are not handled */ + break; + } + } } static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { - .dpms = sti_drm_crtc_dpms, - .prepare = sti_drm_crtc_prepare, - .commit = sti_drm_crtc_commit, - .mode_fixup = sti_drm_crtc_mode_fixup, + .enable = sti_crtc_enable, + .disable = sti_crtc_disabling, + .mode_fixup = sti_crtc_mode_fixup, .mode_set = drm_helper_crtc_mode_set, - .mode_set_nofb = sti_drm_crtc_mode_set_nofb, + .mode_set_nofb = sti_crtc_mode_set_nofb, .mode_set_base = drm_helper_crtc_mode_set_base, - .disable = sti_drm_crtc_disable, - .atomic_begin = sti_drm_atomic_begin, - .atomic_flush = sti_drm_atomic_flush, + .atomic_begin = sti_crtc_atomic_begin, + .atomic_flush = sti_crtc_atomic_flush, }; -static void sti_drm_crtc_destroy(struct drm_crtc *crtc) +static void sti_crtc_destroy(struct drm_crtc *crtc) { DRM_DEBUG_KMS("\n"); drm_crtc_cleanup(crtc); } -static int sti_drm_crtc_set_property(struct drm_crtc *crtc, - struct drm_property *property, - uint64_t val) +static int sti_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, + uint64_t val) { DRM_DEBUG_KMS("\n"); return 0; } -int sti_drm_crtc_vblank_cb(struct notifier_block *nb, - unsigned long event, void *data) +int sti_crtc_vblank_cb(struct notifier_block *nb, + unsigned long event, void *data) { struct drm_device *drm_dev; struct sti_compositor *compo = container_of(nb, struct sti_compositor, vtg_vblank_nb); int *crtc = data; unsigned long flags; - struct sti_drm_private *priv; + struct sti_private *priv; drm_dev = compo->mixer[*crtc]->drm_crtc.dev; priv = drm_dev->dev_private; @@ -235,21 +275,38 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb, spin_lock_irqsave(&drm_dev->event_lock, flags); if (compo->mixer[*crtc]->pending_event) { drm_send_vblank_event(drm_dev, -1, - compo->mixer[*crtc]->pending_event); + compo->mixer[*crtc]->pending_event); drm_vblank_put(drm_dev, *crtc); compo->mixer[*crtc]->pending_event = NULL; } spin_unlock_irqrestore(&drm_dev->event_lock, flags); + if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) { + struct drm_plane *p; + + /* Disable mixer only if all overlay planes (GDP and VDP) + * are disabled */ + list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { + struct sti_plane *plane = to_sti_plane(p); + + if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP) + if (plane->status != STI_PLANE_DISABLED) + return 0; + } + sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc); + } + return 0; } -int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) +int sti_crtc_enable_vblank(struct drm_device *dev, int crtc) { - struct sti_drm_private *dev_priv = dev->dev_private; + struct sti_private *dev_priv = dev->dev_private; struct sti_compositor *compo = dev_priv->compo; struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; + DRM_DEBUG_DRIVER("\n"); + if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ? compo->vtg_main : compo->vtg_aux, vtg_vblank_nb, crtc)) { @@ -259,11 +316,11 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) return 0; } -EXPORT_SYMBOL(sti_drm_crtc_enable_vblank); +EXPORT_SYMBOL(sti_crtc_enable_vblank); -void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) +void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc) { - struct sti_drm_private *priv = dev->dev_private; + struct sti_private *priv = drm_dev->dev_private; struct sti_compositor *compo = priv->compo; struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; @@ -275,23 +332,23 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) /* free the resources of the pending requests */ if (compo->mixer[crtc]->pending_event) { - drm_vblank_put(dev, crtc); + drm_vblank_put(drm_dev, crtc); compo->mixer[crtc]->pending_event = NULL; } } -EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); +EXPORT_SYMBOL(sti_crtc_disable_vblank); static struct drm_crtc_funcs sti_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .destroy = sti_drm_crtc_destroy, - .set_property = sti_drm_crtc_set_property, + .destroy = sti_crtc_destroy, + .set_property = sti_crtc_set_property, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; -bool sti_drm_crtc_is_main(struct drm_crtc *crtc) +bool sti_crtc_is_main(struct drm_crtc *crtc) { struct sti_mixer *mixer = to_sti_mixer(crtc); @@ -300,18 +357,18 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc) return false; } -EXPORT_SYMBOL(sti_drm_crtc_is_main); +EXPORT_SYMBOL(sti_crtc_is_main); -int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, - struct drm_plane *primary, struct drm_plane *cursor) +int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, + struct drm_plane *primary, struct drm_plane *cursor) { struct drm_crtc *crtc = &mixer->drm_crtc; int res; res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, - &sti_crtc_funcs); + &sti_crtc_funcs); if (res) { - DRM_ERROR("Can not initialze CRTC\n"); + DRM_ERROR("Can't initialze CRTC\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h new file mode 100644 index 000000000000..51963e6ddbe7 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_crtc.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_CRTC_H_ +#define _STI_CRTC_H_ + +#include <drm/drmP.h> + +struct sti_mixer; + +int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, + struct drm_plane *primary, struct drm_plane *cursor); +int sti_crtc_enable_vblank(struct drm_device *dev, int crtc); +void sti_crtc_disable_vblank(struct drm_device *dev, int crtc); +int sti_crtc_vblank_cb(struct notifier_block *nb, + unsigned long event, void *data); +bool sti_crtc_is_main(struct drm_crtc *drm_crtc); + +#endif diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index 010eaee60bf7..dd1032195051 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -7,8 +7,14 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> + +#include "sti_compositor.h" #include "sti_cursor.h" -#include "sti_layer.h" +#include "sti_plane.h" #include "sti_vtg.h" /* Registers */ @@ -42,15 +48,19 @@ struct dma_pixmap { /** * STI Cursor structure * - * @layer: layer structure - * @width: cursor width - * @height: cursor height - * @clut: color look up table - * @clut_paddr: color look up table physical address - * @pixmap: pixmap dma buffer (clut8-format cursor) + * @sti_plane: sti_plane structure + * @dev: driver device + * @regs: cursor registers + * @width: cursor width + * @height: cursor height + * @clut: color look up table + * @clut_paddr: color look up table physical address + * @pixmap: pixmap dma buffer (clut8-format cursor) */ struct sti_cursor { - struct sti_layer layer; + struct sti_plane plane; + struct device *dev; + void __iomem *regs; unsigned int width; unsigned int height; unsigned short *clut; @@ -62,22 +72,10 @@ static const uint32_t cursor_supported_formats[] = { DRM_FORMAT_ARGB8888, }; -#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer) - -static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer) -{ - return cursor_supported_formats; -} - -static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer) -{ - return ARRAY_SIZE(cursor_supported_formats); -} +#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane) -static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer) +static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src) { - struct sti_cursor *cursor = to_sti_cursor(layer); - u32 *src = layer->vaddr; u8 *dst = cursor->pixmap.base; unsigned int i, j; u32 a, r, g, b; @@ -96,127 +94,155 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer) } } -static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare) +static void sti_cursor_init(struct sti_cursor *cursor) { - struct sti_cursor *cursor = to_sti_cursor(layer); - struct drm_display_mode *mode = layer->mode; + unsigned short *base = cursor->clut; + unsigned int a, r, g, b; + + /* Assign CLUT values, ARGB444 format */ + for (a = 0; a < 4; a++) + for (r = 0; r < 4; r++) + for (g = 0; g < 4; g++) + for (b = 0; b < 4; b++) + *base++ = (a * 5) << 12 | + (r * 5) << 8 | + (g * 5) << 4 | + (b * 5); +} + +static void sti_cursor_atomic_update(struct drm_plane *drm_plane, + struct drm_plane_state *oldstate) +{ + struct drm_plane_state *state = drm_plane->state; + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_cursor *cursor = to_sti_cursor(plane); + struct drm_crtc *crtc = state->crtc; + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct drm_framebuffer *fb = state->fb; + struct drm_display_mode *mode = &crtc->mode; + int dst_x = state->crtc_x; + int dst_y = state->crtc_y; + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); + /* src_x are in 16.16 format */ + int src_w = state->src_w >> 16; + int src_h = state->src_h >> 16; + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; + struct drm_gem_cma_object *cma_obj; u32 y, x; u32 val; - DRM_DEBUG_DRIVER("\n"); + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", + crtc->base.id, sti_mixer_to_str(mixer), + drm_plane->base.id, sti_plane_to_str(plane)); + DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y); - dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); + dev_dbg(cursor->dev, "%s %s\n", __func__, + sti_plane_to_str(plane)); - if (layer->src_w < STI_CURS_MIN_SIZE || - layer->src_h < STI_CURS_MIN_SIZE || - layer->src_w > STI_CURS_MAX_SIZE || - layer->src_h > STI_CURS_MAX_SIZE) { + if (src_w < STI_CURS_MIN_SIZE || + src_h < STI_CURS_MIN_SIZE || + src_w > STI_CURS_MAX_SIZE || + src_h > STI_CURS_MAX_SIZE) { DRM_ERROR("Invalid cursor size (%dx%d)\n", - layer->src_w, layer->src_h); - return -EINVAL; + src_w, src_h); + return; } /* If the cursor size has changed, re-allocated the pixmap */ if (!cursor->pixmap.base || - (cursor->width != layer->src_w) || - (cursor->height != layer->src_h)) { - cursor->width = layer->src_w; - cursor->height = layer->src_h; + (cursor->width != src_w) || + (cursor->height != src_h)) { + cursor->width = src_w; + cursor->height = src_h; if (cursor->pixmap.base) - dma_free_writecombine(layer->dev, + dma_free_writecombine(cursor->dev, cursor->pixmap.size, cursor->pixmap.base, cursor->pixmap.paddr); cursor->pixmap.size = cursor->width * cursor->height; - cursor->pixmap.base = dma_alloc_writecombine(layer->dev, + cursor->pixmap.base = dma_alloc_writecombine(cursor->dev, cursor->pixmap.size, &cursor->pixmap.paddr, GFP_KERNEL | GFP_DMA); if (!cursor->pixmap.base) { DRM_ERROR("Failed to allocate memory for pixmap\n"); - return -ENOMEM; + return; } } + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + if (!cma_obj) { + DRM_ERROR("Can't get CMA GEM object for fb\n"); + return; + } + /* Convert ARGB8888 to CLUT8 */ - sti_cursor_argb8888_to_clut8(layer); + sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr); /* AWS and AWE depend on the mode */ y = sti_vtg_get_line_number(*mode, 0); x = sti_vtg_get_pixel_number(*mode, 0); val = y << 16 | x; - writel(val, layer->regs + CUR_AWS); + writel(val, cursor->regs + CUR_AWS); y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); val = y << 16 | x; - writel(val, layer->regs + CUR_AWE); + writel(val, cursor->regs + CUR_AWE); if (first_prepare) { /* Set and fetch CLUT */ - writel(cursor->clut_paddr, layer->regs + CUR_CML); - writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL); + writel(cursor->clut_paddr, cursor->regs + CUR_CML); + writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL); } - return 0; -} - -static int sti_cursor_commit_layer(struct sti_layer *layer) -{ - struct sti_cursor *cursor = to_sti_cursor(layer); - struct drm_display_mode *mode = layer->mode; - u32 ydo, xdo; - - dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); - /* Set memory location, size, and position */ - writel(cursor->pixmap.paddr, layer->regs + CUR_PML); - writel(cursor->width, layer->regs + CUR_PMP); - writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE); + writel(cursor->pixmap.paddr, cursor->regs + CUR_PML); + writel(cursor->width, cursor->regs + CUR_PMP); + writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE); - ydo = sti_vtg_get_line_number(*mode, layer->dst_y); - xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y); - writel((ydo << 16) | xdo, layer->regs + CUR_VPO); + y = sti_vtg_get_line_number(*mode, dst_y); + x = sti_vtg_get_pixel_number(*mode, dst_y); + writel((y << 16) | x, cursor->regs + CUR_VPO); - return 0; + plane->status = STI_PLANE_UPDATED; } -static int sti_cursor_disable_layer(struct sti_layer *layer) +static void sti_cursor_atomic_disable(struct drm_plane *drm_plane, + struct drm_plane_state *oldstate) { - return 0; -} + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); -static void sti_cursor_init(struct sti_layer *layer) -{ - struct sti_cursor *cursor = to_sti_cursor(layer); - unsigned short *base = cursor->clut; - unsigned int a, r, g, b; + if (!drm_plane->crtc) { + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", + drm_plane->base.id); + return; + } - /* Assign CLUT values, ARGB444 format */ - for (a = 0; a < 4; a++) - for (r = 0; r < 4; r++) - for (g = 0; g < 4; g++) - for (b = 0; b < 4; b++) - *base++ = (a * 5) << 12 | - (r * 5) << 8 | - (g * 5) << 4 | - (b * 5); + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), + drm_plane->base.id, sti_plane_to_str(plane)); + + plane->status = STI_PLANE_DISABLING; } -static const struct sti_layer_funcs cursor_ops = { - .get_formats = sti_cursor_get_formats, - .get_nb_formats = sti_cursor_get_nb_formats, - .init = sti_cursor_init, - .prepare = sti_cursor_prepare_layer, - .commit = sti_cursor_commit_layer, - .disable = sti_cursor_disable_layer, +static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = { + .atomic_update = sti_cursor_atomic_update, + .atomic_disable = sti_cursor_atomic_disable, }; -struct sti_layer *sti_cursor_create(struct device *dev) +struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, + struct device *dev, int desc, + void __iomem *baseaddr, + unsigned int possible_crtcs) { struct sti_cursor *cursor; + size_t size; + int res; cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL); if (!cursor) { @@ -225,18 +251,43 @@ struct sti_layer *sti_cursor_create(struct device *dev) } /* Allocate clut buffer */ - cursor->clut = dma_alloc_writecombine(dev, - 0x100 * sizeof(unsigned short), - &cursor->clut_paddr, - GFP_KERNEL | GFP_DMA); + size = 0x100 * sizeof(unsigned short); + cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr, + GFP_KERNEL | GFP_DMA); if (!cursor->clut) { DRM_ERROR("Failed to allocate memory for cursor clut\n"); - devm_kfree(dev, cursor); - return NULL; + goto err_clut; + } + + cursor->dev = dev; + cursor->regs = baseaddr; + cursor->plane.desc = desc; + cursor->plane.status = STI_PLANE_DISABLED; + + sti_cursor_init(cursor); + + res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane, + possible_crtcs, + &sti_plane_helpers_funcs, + cursor_supported_formats, + ARRAY_SIZE(cursor_supported_formats), + DRM_PLANE_TYPE_CURSOR); + if (res) { + DRM_ERROR("Failed to initialize universal plane\n"); + goto err_plane; } - cursor->layer.ops = &cursor_ops; + drm_plane_helper_add(&cursor->plane.drm_plane, + &sti_cursor_helpers_funcs); + + sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR); + + return &cursor->plane.drm_plane; - return (struct sti_layer *)cursor; +err_plane: + dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr); +err_clut: + devm_kfree(dev, cursor); + return NULL; } diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h index 3c9827404f27..2ee5c10e8b33 100644 --- a/drivers/gpu/drm/sti/sti_cursor.h +++ b/drivers/gpu/drm/sti/sti_cursor.h @@ -7,6 +7,9 @@ #ifndef _STI_CURSOR_H_ #define _STI_CURSOR_H_ -struct sti_layer *sti_cursor_create(struct device *dev); +struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, + struct device *dev, int desc, + void __iomem *baseaddr, + unsigned int possible_crtcs); #endif diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h deleted file mode 100644 index caca8b14f017..000000000000 --- a/drivers/gpu/drm/sti/sti_drm_crtc.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) STMicroelectronics SA 2014 - * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. - * License terms: GNU General Public License (GPL), version 2 - */ - -#ifndef _STI_DRM_CRTC_H_ -#define _STI_DRM_CRTC_H_ - -#include <drm/drmP.h> - -struct sti_mixer; - -int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, - struct drm_plane *primary, struct drm_plane *cursor); -int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); -void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); -int sti_drm_crtc_vblank_cb(struct notifier_block *nb, - unsigned long event, void *data); -bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc); - -#endif diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c deleted file mode 100644 index 64d4ed43dda3..000000000000 --- a/drivers/gpu/drm/sti/sti_drm_plane.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (C) STMicroelectronics SA 2014 - * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> - * Fabien Dessenne <fabien.dessenne@st.com> - * for STMicroelectronics. - * License terms: GNU General Public License (GPL), version 2 - */ - -#include <drm/drmP.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_plane_helper.h> - -#include "sti_compositor.h" -#include "sti_drm_drv.h" -#include "sti_drm_plane.h" -#include "sti_vtg.h" - -enum sti_layer_desc sti_layer_default_zorder[] = { - STI_GDP_0, - STI_VID_0, - STI_GDP_1, - STI_VID_1, - STI_GDP_2, - STI_GDP_3, -}; - -/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */ - -static int -sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) -{ - struct sti_layer *layer = to_sti_layer(plane); - struct sti_mixer *mixer = to_sti_mixer(crtc); - int res; - - DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", - crtc->base.id, sti_mixer_to_str(mixer), - plane->base.id, sti_layer_to_str(layer)); - DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y); - - res = sti_mixer_set_layer_depth(mixer, layer); - if (res) { - DRM_ERROR("Can not set layer depth\n"); - return res; - } - - /* src_x are in 16.16 format. */ - res = sti_layer_prepare(layer, crtc, fb, - &crtc->mode, mixer->id, - crtc_x, crtc_y, crtc_w, crtc_h, - src_x >> 16, src_y >> 16, - src_w >> 16, src_h >> 16); - if (res) { - DRM_ERROR("Layer prepare failed\n"); - return res; - } - - res = sti_layer_commit(layer); - if (res) { - DRM_ERROR("Layer commit failed\n"); - return res; - } - - res = sti_mixer_set_layer_status(mixer, layer, true); - if (res) { - DRM_ERROR("Can not enable layer at mixer\n"); - return res; - } - - return 0; -} - -static int sti_drm_disable_plane(struct drm_plane *plane) -{ - struct sti_layer *layer; - struct sti_mixer *mixer; - int lay_res, mix_res; - - if (!plane->crtc) { - DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id); - return 0; - } - layer = to_sti_layer(plane); - mixer = to_sti_mixer(plane->crtc); - - DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", - plane->crtc->base.id, sti_mixer_to_str(mixer), - plane->base.id, sti_layer_to_str(layer)); - - /* Disable layer at mixer level */ - mix_res = sti_mixer_set_layer_status(mixer, layer, false); - if (mix_res) - DRM_ERROR("Can not disable layer at mixer\n"); - - /* Wait a while to be sure that a Vsync event is received */ - msleep(WAIT_NEXT_VSYNC_MS); - - /* Then disable layer itself */ - lay_res = sti_layer_disable(layer); - if (lay_res) - DRM_ERROR("Layer disable failed\n"); - - if (lay_res || mix_res) - return -EINVAL; - - return 0; -} - -static void sti_drm_plane_destroy(struct drm_plane *plane) -{ - DRM_DEBUG_DRIVER("\n"); - - drm_plane_helper_disable(plane); - drm_plane_cleanup(plane); -} - -static int sti_drm_plane_set_property(struct drm_plane *plane, - struct drm_property *property, - uint64_t val) -{ - struct drm_device *dev = plane->dev; - struct sti_drm_private *private = dev->dev_private; - struct sti_layer *layer = to_sti_layer(plane); - - DRM_DEBUG_DRIVER("\n"); - - if (property == private->plane_zorder_property) { - layer->zorder = val; - return 0; - } - - return -EINVAL; -} - -static struct drm_plane_funcs sti_drm_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = sti_drm_plane_destroy, - .set_property = sti_drm_plane_set_property, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, -}; - -static int sti_drm_plane_prepare_fb(struct drm_plane *plane, - struct drm_framebuffer *fb, - const struct drm_plane_state *new_state) -{ - return 0; -} - -static void sti_drm_plane_cleanup_fb(struct drm_plane *plane, - struct drm_framebuffer *fb, - const struct drm_plane_state *old_fb) -{ -} - -static int sti_drm_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) -{ - return 0; -} - -static void sti_drm_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *oldstate) -{ - struct drm_plane_state *state = plane->state; - - sti_drm_update_plane(plane, state->crtc, state->fb, - state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - state->src_x, state->src_y, - state->src_w, state->src_h); -} - -static void sti_drm_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *oldstate) -{ - sti_drm_disable_plane(plane); -} - -static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = { - .prepare_fb = sti_drm_plane_prepare_fb, - .cleanup_fb = sti_drm_plane_cleanup_fb, - .atomic_check = sti_drm_plane_atomic_check, - .atomic_update = sti_drm_plane_atomic_update, - .atomic_disable = sti_drm_plane_atomic_disable, -}; - -static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane, - uint64_t default_val) -{ - struct drm_device *dev = plane->dev; - struct sti_drm_private *private = dev->dev_private; - struct drm_property *prop; - struct sti_layer *layer = to_sti_layer(plane); - - prop = private->plane_zorder_property; - if (!prop) { - prop = drm_property_create_range(dev, 0, "zpos", 0, - GAM_MIXER_NB_DEPTH_LEVEL - 1); - if (!prop) - return; - - private->plane_zorder_property = prop; - } - - drm_object_attach_property(&plane->base, prop, default_val); - layer->zorder = default_val; -} - -struct drm_plane *sti_drm_plane_init(struct drm_device *dev, - struct sti_layer *layer, - unsigned int possible_crtcs, - enum drm_plane_type type) -{ - int err, i; - uint64_t default_zorder = 0; - - err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs, - &sti_drm_plane_funcs, - sti_layer_get_formats(layer), - sti_layer_get_nb_formats(layer), type); - if (err) { - DRM_ERROR("Failed to initialize plane\n"); - return NULL; - } - - drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs); - - for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++) - if (sti_layer_default_zorder[i] == layer->desc) - break; - - default_zorder = i + 1; - - if (type == DRM_PLANE_TYPE_OVERLAY) - sti_drm_plane_attach_zorder_property(&layer->plane, - default_zorder); - - DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n", - layer->plane.base.id, - sti_layer_to_str(layer), default_zorder); - - return &layer->plane; -} -EXPORT_SYMBOL(sti_drm_plane_init); diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h deleted file mode 100644 index 4f191839f2a7..000000000000 --- a/drivers/gpu/drm/sti/sti_drm_plane.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (C) STMicroelectronics SA 2014 - * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. - * License terms: GNU General Public License (GPL), version 2 - */ - -#ifndef _STI_DRM_PLANE_H_ -#define _STI_DRM_PLANE_H_ - -#include <drm/drmP.h> - -struct sti_layer; - -struct drm_plane *sti_drm_plane_init(struct drm_device *dev, - struct sti_layer *layer, - unsigned int possible_crtcs, - enum drm_plane_type type); -#endif diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 59d558b400b3..6f4af6a8ba1b 100644 --- a/drivers/gpu/drm/sti/sti_drm_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -18,8 +18,8 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> -#include "sti_drm_drv.h" -#include "sti_drm_crtc.h" +#include "sti_crtc.h" +#include "sti_drv.h" #define DRIVER_NAME "sti" #define DRIVER_DESC "STMicroelectronics SoC DRM" @@ -30,15 +30,15 @@ #define STI_MAX_FB_HEIGHT 4096 #define STI_MAX_FB_WIDTH 4096 -static void sti_drm_atomic_schedule(struct sti_drm_private *private, - struct drm_atomic_state *state) +static void sti_atomic_schedule(struct sti_private *private, + struct drm_atomic_state *state) { private->commit.state = state; schedule_work(&private->commit.work); } -static void sti_drm_atomic_complete(struct sti_drm_private *private, - struct drm_atomic_state *state) +static void sti_atomic_complete(struct sti_private *private, + struct drm_atomic_state *state) { struct drm_device *drm = private->drm_dev; @@ -68,18 +68,18 @@ static void sti_drm_atomic_complete(struct sti_drm_private *private, drm_atomic_state_free(state); } -static void sti_drm_atomic_work(struct work_struct *work) +static void sti_atomic_work(struct work_struct *work) { - struct sti_drm_private *private = container_of(work, - struct sti_drm_private, commit.work); + struct sti_private *private = container_of(work, + struct sti_private, commit.work); - sti_drm_atomic_complete(private, private->commit.state); + sti_atomic_complete(private, private->commit.state); } -static int sti_drm_atomic_commit(struct drm_device *drm, - struct drm_atomic_state *state, bool async) +static int sti_atomic_commit(struct drm_device *drm, + struct drm_atomic_state *state, bool async) { - struct sti_drm_private *private = drm->dev_private; + struct sti_private *private = drm->dev_private; int err; err = drm_atomic_helper_prepare_planes(drm, state); @@ -99,21 +99,21 @@ static int sti_drm_atomic_commit(struct drm_device *drm, drm_atomic_helper_swap_state(drm, state); if (async) - sti_drm_atomic_schedule(private, state); + sti_atomic_schedule(private, state); else - sti_drm_atomic_complete(private, state); + sti_atomic_complete(private, state); mutex_unlock(&private->commit.lock); return 0; } -static struct drm_mode_config_funcs sti_drm_mode_config_funcs = { +static struct drm_mode_config_funcs sti_mode_config_funcs = { .fb_create = drm_fb_cma_create, .atomic_check = drm_atomic_helper_check, - .atomic_commit = sti_drm_atomic_commit, + .atomic_commit = sti_atomic_commit, }; -static void sti_drm_mode_config_init(struct drm_device *dev) +static void sti_mode_config_init(struct drm_device *dev) { dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; @@ -126,15 +126,15 @@ static void sti_drm_mode_config_init(struct drm_device *dev) dev->mode_config.max_width = STI_MAX_FB_HEIGHT; dev->mode_config.max_height = STI_MAX_FB_WIDTH; - dev->mode_config.funcs = &sti_drm_mode_config_funcs; + dev->mode_config.funcs = &sti_mode_config_funcs; } -static int sti_drm_load(struct drm_device *dev, unsigned long flags) +static int sti_load(struct drm_device *dev, unsigned long flags) { - struct sti_drm_private *private; + struct sti_private *private; int ret; - private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL); + private = kzalloc(sizeof(*private), GFP_KERNEL); if (!private) { DRM_ERROR("Failed to allocate private\n"); return -ENOMEM; @@ -143,12 +143,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags) private->drm_dev = dev; mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, sti_drm_atomic_work); + INIT_WORK(&private->commit.work, sti_atomic_work); drm_mode_config_init(dev); drm_kms_helper_poll_init(dev); - sti_drm_mode_config_init(dev); + sti_mode_config_init(dev); ret = component_bind_all(dev->dev, dev); if (ret) { @@ -162,13 +162,13 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags) #ifdef CONFIG_DRM_STI_FBDEV drm_fbdev_cma_init(dev, 32, - dev->mode_config.num_crtc, - dev->mode_config.num_connector); + dev->mode_config.num_crtc, + dev->mode_config.num_connector); #endif return 0; } -static const struct file_operations sti_drm_driver_fops = { +static const struct file_operations sti_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .mmap = drm_gem_cma_mmap, @@ -181,33 +181,33 @@ static const struct file_operations sti_drm_driver_fops = { .release = drm_release, }; -static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, - int flags) +static struct dma_buf *sti_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags) { /* we want to be able to write in mmapped buffer */ flags |= O_RDWR; return drm_gem_prime_export(dev, obj, flags); } -static struct drm_driver sti_drm_driver = { +static struct drm_driver sti_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, - .load = sti_drm_load, + .load = sti_load, .gem_free_object = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, - .fops = &sti_drm_driver_fops, + .fops = &sti_driver_fops, .get_vblank_counter = drm_vblank_count, - .enable_vblank = sti_drm_crtc_enable_vblank, - .disable_vblank = sti_drm_crtc_disable_vblank, + .enable_vblank = sti_crtc_enable_vblank, + .disable_vblank = sti_crtc_disable_vblank, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = sti_drm_gem_prime_export, + .gem_prime_export = sti_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, @@ -227,30 +227,32 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } -static int sti_drm_bind(struct device *dev) +static int sti_bind(struct device *dev) { - return drm_platform_init(&sti_drm_driver, to_platform_device(dev)); + return drm_platform_init(&sti_driver, to_platform_device(dev)); } -static void sti_drm_unbind(struct device *dev) +static void sti_unbind(struct device *dev) { drm_put_dev(dev_get_drvdata(dev)); } -static const struct component_master_ops sti_drm_ops = { - .bind = sti_drm_bind, - .unbind = sti_drm_unbind, +static const struct component_master_ops sti_ops = { + .bind = sti_bind, + .unbind = sti_unbind, }; -static int sti_drm_master_probe(struct platform_device *pdev) +static int sti_platform_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *node = dev->parent->of_node; + struct device_node *node = dev->of_node; struct device_node *child_np; struct component_match *match = NULL; dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + of_platform_populate(node, NULL, NULL, dev); + child_np = of_get_next_available_child(node, NULL); while (child_np) { @@ -259,68 +261,33 @@ static int sti_drm_master_probe(struct platform_device *pdev) child_np = of_get_next_available_child(node, child_np); } - return component_master_add_with_match(dev, &sti_drm_ops, match); -} - -static int sti_drm_master_remove(struct platform_device *pdev) -{ - component_master_del(&pdev->dev, &sti_drm_ops); - return 0; + return component_master_add_with_match(dev, &sti_ops, match); } -static struct platform_driver sti_drm_master_driver = { - .probe = sti_drm_master_probe, - .remove = sti_drm_master_remove, - .driver = { - .name = DRIVER_NAME "__master", - }, -}; - -static int sti_drm_platform_probe(struct platform_device *pdev) +static int sti_platform_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node; - struct platform_device *master; - - of_platform_populate(node, NULL, NULL, dev); - - platform_driver_register(&sti_drm_master_driver); - master = platform_device_register_resndata(dev, - DRIVER_NAME "__master", -1, - NULL, 0, NULL, 0); - if (IS_ERR(master)) - return PTR_ERR(master); - - platform_set_drvdata(pdev, master); - return 0; -} - -static int sti_drm_platform_remove(struct platform_device *pdev) -{ - struct platform_device *master = platform_get_drvdata(pdev); - + component_master_del(&pdev->dev, &sti_ops); of_platform_depopulate(&pdev->dev); - platform_device_unregister(master); - platform_driver_unregister(&sti_drm_master_driver); + return 0; } -static const struct of_device_id sti_drm_dt_ids[] = { +static const struct of_device_id sti_dt_ids[] = { { .compatible = "st,sti-display-subsystem", }, { /* end node */ }, }; -MODULE_DEVICE_TABLE(of, sti_drm_dt_ids); +MODULE_DEVICE_TABLE(of, sti_dt_ids); -static struct platform_driver sti_drm_platform_driver = { - .probe = sti_drm_platform_probe, - .remove = sti_drm_platform_remove, +static struct platform_driver sti_platform_driver = { + .probe = sti_platform_probe, + .remove = sti_platform_remove, .driver = { .name = DRIVER_NAME, - .of_match_table = sti_drm_dt_ids, + .of_match_table = sti_dt_ids, }, }; -module_platform_driver(sti_drm_platform_driver); +module_platform_driver(sti_platform_driver); MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drv.h index c413aa3ff402..9372f69e1859 100644 --- a/drivers/gpu/drm/sti/sti_drm_drv.h +++ b/drivers/gpu/drm/sti/sti_drv.h @@ -4,8 +4,8 @@ * License terms: GNU General Public License (GPL), version 2 */ -#ifndef _STI_DRM_DRV_H_ -#define _STI_DRM_DRV_H_ +#ifndef _STI_DRV_H_ +#define _STI_DRV_H_ #include <drm/drmP.h> @@ -20,7 +20,7 @@ struct sti_tvout; * @plane_zorder_property: z-order property for CRTC planes * @drm_dev: drm device */ -struct sti_drm_private { +struct sti_private { struct sti_compositor *compo; struct drm_property *plane_zorder_property; struct drm_device *drm_dev; diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 087906fd8846..9365670427ad 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -9,9 +9,12 @@ #include <linux/clk.h> #include <linux/dma-mapping.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> + #include "sti_compositor.h" #include "sti_gdp.h" -#include "sti_layer.h" +#include "sti_plane.h" #include "sti_vtg.h" #define ALPHASWITCH BIT(6) @@ -26,7 +29,7 @@ #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH) #define GDP_ARGB8565 0x04 #define GDP_ARGB8888 0x05 -#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) +#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) #define GDP_ARGB1555 0x06 #define GDP_ARGB4444 0x07 #define GDP_CLUT8 0x0B @@ -53,8 +56,8 @@ #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) #define GAM_GDP_SIZE_MAX 0x7FF -#define GDP_NODE_NB_BANK 2 -#define GDP_NODE_PER_FIELD 2 +#define GDP_NODE_NB_BANK 2 +#define GDP_NODE_PER_FIELD 2 struct sti_gdp_node { u32 gam_gdp_ctl; @@ -85,16 +88,20 @@ struct sti_gdp_node_list { /** * STI GDP structure * - * @layer: layer structure + * @sti_plane: sti_plane structure + * @dev: driver device + * @regs: gdp registers * @clk_pix: pixel clock for the current gdp * @clk_main_parent: gdp parent clock if main path used * @clk_aux_parent: gdp parent clock if aux path used * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification * @is_curr_top: true if the current node processed is the top field - * @node_list: array of node list + * @node_list: array of node list */ struct sti_gdp { - struct sti_layer layer; + struct sti_plane plane; + struct device *dev; + void __iomem *regs; struct clk *clk_pix; struct clk *clk_main_parent; struct clk *clk_aux_parent; @@ -103,7 +110,7 @@ struct sti_gdp { struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; }; -#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer) +#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane) static const uint32_t gdp_supported_formats[] = { DRM_FORMAT_XRGB8888, @@ -120,16 +127,6 @@ static const uint32_t gdp_supported_formats[] = { DRM_FORMAT_C8, }; -static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer) -{ - return gdp_supported_formats; -} - -static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer) -{ - return ARRAY_SIZE(gdp_supported_formats); -} - static int sti_gdp_fourcc2format(int fourcc) { switch (fourcc) { @@ -175,20 +172,19 @@ static int sti_gdp_get_alpharange(int format) /** * sti_gdp_get_free_nodes - * @layer: gdp layer + * @gdp: gdp pointer * * Look for a GDP node list that is not currently read by the HW. * * RETURNS: * Pointer to the free GDP node list */ -static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) +static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp) { int hw_nvn; - struct sti_gdp *gdp = to_sti_gdp(layer); unsigned int i; - hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); + hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); if (!hw_nvn) goto end; @@ -199,7 +195,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) /* in hazardious cases restart with the first node */ DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", - sti_layer_to_str(layer), hw_nvn); + sti_plane_to_str(&gdp->plane), hw_nvn); end: return &gdp->node_list[0]; @@ -207,7 +203,7 @@ end: /** * sti_gdp_get_current_nodes - * @layer: GDP layer + * @gdp: gdp pointer * * Look for GDP nodes that are currently read by the HW. * @@ -215,13 +211,12 @@ end: * Pointer to the current GDP node list */ static -struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) +struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp) { int hw_nvn; - struct sti_gdp *gdp = to_sti_gdp(layer); unsigned int i; - hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); + hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); if (!hw_nvn) goto end; @@ -232,205 +227,25 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) end: DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", - hw_nvn, sti_layer_to_str(layer)); + hw_nvn, sti_plane_to_str(&gdp->plane)); return NULL; } /** - * sti_gdp_prepare_layer - * @lay: gdp layer - * @first_prepare: true if it is the first time this function is called - * - * Update the free GDP node list according to the layer properties. - * - * RETURNS: - * 0 on success. - */ -static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) -{ - struct sti_gdp_node_list *list; - struct sti_gdp_node *top_field, *btm_field; - struct drm_display_mode *mode = layer->mode; - struct device *dev = layer->dev; - struct sti_gdp *gdp = to_sti_gdp(layer); - struct sti_compositor *compo = dev_get_drvdata(dev); - int format; - unsigned int depth, bpp; - int rate = mode->clock * 1000; - int res; - u32 ydo, xdo, yds, xds; - - list = sti_gdp_get_free_nodes(layer); - top_field = list->top_field; - btm_field = list->btm_field; - - dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, - sti_layer_to_str(layer), top_field, btm_field); - - /* Build the top field from layer params */ - top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; - top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; - format = sti_gdp_fourcc2format(layer->format); - if (format == -1) { - DRM_ERROR("Format not supported by GDP %.4s\n", - (char *)&layer->format); - return 1; - } - top_field->gam_gdp_ctl |= format; - top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); - top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; - - /* pixel memory location */ - drm_fb_get_bpp_depth(layer->format, &depth, &bpp); - top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0]; - top_field->gam_gdp_pml += layer->src_x * (bpp >> 3); - top_field->gam_gdp_pml += layer->src_y * layer->pitches[0]; - - /* input parameters */ - top_field->gam_gdp_pmp = layer->pitches[0]; - top_field->gam_gdp_size = - clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 | - clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX); - - /* output parameters */ - ydo = sti_vtg_get_line_number(*mode, layer->dst_y); - yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1); - xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x); - xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1); - top_field->gam_gdp_vpo = (ydo << 16) | xdo; - top_field->gam_gdp_vps = (yds << 16) | xds; - - /* Same content and chained together */ - memcpy(btm_field, top_field, sizeof(*btm_field)); - top_field->gam_gdp_nvn = list->btm_field_paddr; - btm_field->gam_gdp_nvn = list->top_field_paddr; - - /* Interlaced mode */ - if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) - btm_field->gam_gdp_pml = top_field->gam_gdp_pml + - layer->pitches[0]; - - if (first_prepare) { - /* Register gdp callback */ - if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ? - compo->vtg_main : compo->vtg_aux, - &gdp->vtg_field_nb, layer->mixer_id)) { - DRM_ERROR("Cannot register VTG notifier\n"); - return 1; - } - - /* Set and enable gdp clock */ - if (gdp->clk_pix) { - struct clk *clkp; - /* According to the mixer used, the gdp pixel clock - * should have a different parent clock. */ - if (layer->mixer_id == STI_MIXER_MAIN) - clkp = gdp->clk_main_parent; - else - clkp = gdp->clk_aux_parent; - - if (clkp) - clk_set_parent(gdp->clk_pix, clkp); - - res = clk_set_rate(gdp->clk_pix, rate); - if (res < 0) { - DRM_ERROR("Cannot set rate (%dHz) for gdp\n", - rate); - return 1; - } - - if (clk_prepare_enable(gdp->clk_pix)) { - DRM_ERROR("Failed to prepare/enable gdp\n"); - return 1; - } - } - } - - return 0; -} - -/** - * sti_gdp_commit_layer - * @lay: gdp layer - * - * Update the NVN field of the 'right' field of the current GDP node (being - * used by the HW) with the address of the updated ('free') top field GDP node. - * - In interlaced mode the 'right' field is the bottom field as we update - * frames starting from their top field - * - In progressive mode, we update both bottom and top fields which are - * equal nodes. - * At the next VSYNC, the updated node list will be used by the HW. - * - * RETURNS: - * 0 on success. - */ -static int sti_gdp_commit_layer(struct sti_layer *layer) -{ - struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer); - struct sti_gdp_node *updated_top_node = updated_list->top_field; - struct sti_gdp_node *updated_btm_node = updated_list->btm_field; - struct sti_gdp *gdp = to_sti_gdp(layer); - u32 dma_updated_top = updated_list->top_field_paddr; - u32 dma_updated_btm = updated_list->btm_field_paddr; - struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); - - dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, - sti_layer_to_str(layer), - updated_top_node, updated_btm_node); - dev_dbg(layer->dev, "Current NVN:0x%X\n", - readl(layer->regs + GAM_GDP_NVN_OFFSET)); - dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n", - (unsigned long)layer->paddr, - readl(layer->regs + GAM_GDP_PML_OFFSET)); - - if (curr_list == NULL) { - /* First update or invalid node should directly write in the - * hw register */ - DRM_DEBUG_DRIVER("%s first update (or invalid node)", - sti_layer_to_str(layer)); - - writel(gdp->is_curr_top == true ? - dma_updated_btm : dma_updated_top, - layer->regs + GAM_GDP_NVN_OFFSET); - return 0; - } - - if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) { - if (gdp->is_curr_top == true) { - /* Do not update in the middle of the frame, but - * postpone the update after the bottom field has - * been displayed */ - curr_list->btm_field->gam_gdp_nvn = dma_updated_top; - } else { - /* Direct update to avoid one frame delay */ - writel(dma_updated_top, - layer->regs + GAM_GDP_NVN_OFFSET); - } - } else { - /* Direct update for progressive to avoid one frame delay */ - writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); - } - - return 0; -} - -/** - * sti_gdp_disable_layer - * @lay: gdp layer + * sti_gdp_disable + * @gdp: gdp pointer * * Disable a GDP. - * - * RETURNS: - * 0 on success. */ -static int sti_gdp_disable_layer(struct sti_layer *layer) +static void sti_gdp_disable(struct sti_gdp *gdp) { + struct drm_plane *drm_plane = &gdp->plane.drm_plane; + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); + struct sti_compositor *compo = dev_get_drvdata(gdp->dev); unsigned int i; - struct sti_gdp *gdp = to_sti_gdp(layer); - struct sti_compositor *compo = dev_get_drvdata(layer->dev); - DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); + DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane)); /* Set the nodes as 'to be ignored on mixer' */ for (i = 0; i < GDP_NODE_NB_BANK; i++) { @@ -438,14 +253,14 @@ static int sti_gdp_disable_layer(struct sti_layer *layer) gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; } - if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ? + if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ? compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); if (gdp->clk_pix) clk_disable_unprepare(gdp->clk_pix); - return 0; + gdp->plane.status = STI_PLANE_DISABLED; } /** @@ -464,6 +279,14 @@ int sti_gdp_field_cb(struct notifier_block *nb, { struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb); + if (gdp->plane.status == STI_PLANE_FLUSHING) { + /* disable need to be synchronize on vsync event */ + DRM_DEBUG_DRIVER("Vsync event received => disable %s\n", + sti_plane_to_str(&gdp->plane)); + + sti_gdp_disable(gdp); + } + switch (event) { case VTG_TOP_FIELD_EVENT: gdp->is_curr_top = true; @@ -479,10 +302,9 @@ int sti_gdp_field_cb(struct notifier_block *nb, return 0; } -static void sti_gdp_init(struct sti_layer *layer) +static void sti_gdp_init(struct sti_gdp *gdp) { - struct sti_gdp *gdp = to_sti_gdp(layer); - struct device_node *np = layer->dev->of_node; + struct device_node *np = gdp->dev->of_node; dma_addr_t dma_addr; void *base; unsigned int i, size; @@ -490,8 +312,8 @@ static void sti_gdp_init(struct sti_layer *layer) /* Allocate all the nodes within a single memory page */ size = sizeof(struct sti_gdp_node) * GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; - base = dma_alloc_writecombine(layer->dev, - size, &dma_addr, GFP_KERNEL | GFP_DMA); + base = dma_alloc_writecombine(gdp->dev, + size, &dma_addr, GFP_KERNEL | GFP_DMA); if (!base) { DRM_ERROR("Failed to allocate memory for GDP node\n"); @@ -526,7 +348,7 @@ static void sti_gdp_init(struct sti_layer *layer) /* GDP of STiH407 chip have its own pixel clock */ char *clk_name; - switch (layer->desc) { + switch (gdp->plane.desc) { case STI_GDP_0: clk_name = "pix_gdp1"; break; @@ -544,32 +366,249 @@ static void sti_gdp_init(struct sti_layer *layer) return; } - gdp->clk_pix = devm_clk_get(layer->dev, clk_name); + gdp->clk_pix = devm_clk_get(gdp->dev, clk_name); if (IS_ERR(gdp->clk_pix)) DRM_ERROR("Cannot get %s clock\n", clk_name); - gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent"); + gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent"); if (IS_ERR(gdp->clk_main_parent)) DRM_ERROR("Cannot get main_parent clock\n"); - gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent"); + gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent"); if (IS_ERR(gdp->clk_aux_parent)) DRM_ERROR("Cannot get aux_parent clock\n"); } } -static const struct sti_layer_funcs gdp_ops = { - .get_formats = sti_gdp_get_formats, - .get_nb_formats = sti_gdp_get_nb_formats, - .init = sti_gdp_init, - .prepare = sti_gdp_prepare_layer, - .commit = sti_gdp_commit_layer, - .disable = sti_gdp_disable_layer, +static void sti_gdp_atomic_update(struct drm_plane *drm_plane, + struct drm_plane_state *oldstate) +{ + struct drm_plane_state *state = drm_plane->state; + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_gdp *gdp = to_sti_gdp(plane); + struct drm_crtc *crtc = state->crtc; + struct sti_compositor *compo = dev_get_drvdata(gdp->dev); + struct drm_framebuffer *fb = state->fb; + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; + struct sti_mixer *mixer; + struct drm_display_mode *mode; + int dst_x, dst_y, dst_w, dst_h; + int src_x, src_y, src_w, src_h; + struct drm_gem_cma_object *cma_obj; + struct sti_gdp_node_list *list; + struct sti_gdp_node_list *curr_list; + struct sti_gdp_node *top_field, *btm_field; + u32 dma_updated_top; + u32 dma_updated_btm; + int format; + unsigned int depth, bpp; + u32 ydo, xdo, yds, xds; + int res; + + /* Manage the case where crtc is null (disabled) */ + if (!crtc) + return; + + mixer = to_sti_mixer(crtc); + mode = &crtc->mode; + dst_x = state->crtc_x; + dst_y = state->crtc_y; + dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); + dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); + /* src_x are in 16.16 format */ + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", + crtc->base.id, sti_mixer_to_str(mixer), + drm_plane->base.id, sti_plane_to_str(plane)); + DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", + sti_plane_to_str(plane), + dst_w, dst_h, dst_x, dst_y, + src_w, src_h, src_x, src_y); + + list = sti_gdp_get_free_nodes(gdp); + top_field = list->top_field; + btm_field = list->btm_field; + + dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, + sti_plane_to_str(plane), top_field, btm_field); + + /* build the top field */ + top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; + top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; + format = sti_gdp_fourcc2format(fb->pixel_format); + if (format == -1) { + DRM_ERROR("Format not supported by GDP %.4s\n", + (char *)&fb->pixel_format); + return; + } + top_field->gam_gdp_ctl |= format; + top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); + top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; + + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + if (!cma_obj) { + DRM_ERROR("Can't get CMA GEM object for fb\n"); + return; + } + + DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, + (char *)&fb->pixel_format, + (unsigned long)cma_obj->paddr); + + /* pixel memory location */ + drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp); + top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0]; + top_field->gam_gdp_pml += src_x * (bpp >> 3); + top_field->gam_gdp_pml += src_y * fb->pitches[0]; + + /* input parameters */ + top_field->gam_gdp_pmp = fb->pitches[0]; + top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 | + clamp_val(src_w, 0, GAM_GDP_SIZE_MAX); + + /* output parameters */ + ydo = sti_vtg_get_line_number(*mode, dst_y); + yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1); + xdo = sti_vtg_get_pixel_number(*mode, dst_x); + xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1); + top_field->gam_gdp_vpo = (ydo << 16) | xdo; + top_field->gam_gdp_vps = (yds << 16) | xds; + + /* Same content and chained together */ + memcpy(btm_field, top_field, sizeof(*btm_field)); + top_field->gam_gdp_nvn = list->btm_field_paddr; + btm_field->gam_gdp_nvn = list->top_field_paddr; + + /* Interlaced mode */ + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + btm_field->gam_gdp_pml = top_field->gam_gdp_pml + + fb->pitches[0]; + + if (first_prepare) { + /* Register gdp callback */ + if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ? + compo->vtg_main : compo->vtg_aux, + &gdp->vtg_field_nb, mixer->id)) { + DRM_ERROR("Cannot register VTG notifier\n"); + return; + } + + /* Set and enable gdp clock */ + if (gdp->clk_pix) { + struct clk *clkp; + int rate = mode->clock * 1000; + + /* According to the mixer used, the gdp pixel clock + * should have a different parent clock. */ + if (mixer->id == STI_MIXER_MAIN) + clkp = gdp->clk_main_parent; + else + clkp = gdp->clk_aux_parent; + + if (clkp) + clk_set_parent(gdp->clk_pix, clkp); + + res = clk_set_rate(gdp->clk_pix, rate); + if (res < 0) { + DRM_ERROR("Cannot set rate (%dHz) for gdp\n", + rate); + return; + } + + if (clk_prepare_enable(gdp->clk_pix)) { + DRM_ERROR("Failed to prepare/enable gdp\n"); + return; + } + } + } + + /* Update the NVN field of the 'right' field of the current GDP node + * (being used by the HW) with the address of the updated ('free') top + * field GDP node. + * - In interlaced mode the 'right' field is the bottom field as we + * update frames starting from their top field + * - In progressive mode, we update both bottom and top fields which + * are equal nodes. + * At the next VSYNC, the updated node list will be used by the HW. + */ + curr_list = sti_gdp_get_current_nodes(gdp); + dma_updated_top = list->top_field_paddr; + dma_updated_btm = list->btm_field_paddr; + + dev_dbg(gdp->dev, "Current NVN:0x%X\n", + readl(gdp->regs + GAM_GDP_NVN_OFFSET)); + dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n", + (unsigned long)cma_obj->paddr, + readl(gdp->regs + GAM_GDP_PML_OFFSET)); + + if (!curr_list) { + /* First update or invalid node should directly write in the + * hw register */ + DRM_DEBUG_DRIVER("%s first update (or invalid node)", + sti_plane_to_str(plane)); + + writel(gdp->is_curr_top ? + dma_updated_btm : dma_updated_top, + gdp->regs + GAM_GDP_NVN_OFFSET); + goto end; + } + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + if (gdp->is_curr_top) { + /* Do not update in the middle of the frame, but + * postpone the update after the bottom field has + * been displayed */ + curr_list->btm_field->gam_gdp_nvn = dma_updated_top; + } else { + /* Direct update to avoid one frame delay */ + writel(dma_updated_top, + gdp->regs + GAM_GDP_NVN_OFFSET); + } + } else { + /* Direct update for progressive to avoid one frame delay */ + writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET); + } + +end: + plane->status = STI_PLANE_UPDATED; +} + +static void sti_gdp_atomic_disable(struct drm_plane *drm_plane, + struct drm_plane_state *oldstate) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); + + if (!drm_plane->crtc) { + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", + drm_plane->base.id); + return; + } + + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), + drm_plane->base.id, sti_plane_to_str(plane)); + + plane->status = STI_PLANE_DISABLING; +} + +static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = { + .atomic_update = sti_gdp_atomic_update, + .atomic_disable = sti_gdp_atomic_disable, }; -struct sti_layer *sti_gdp_create(struct device *dev, int id) +struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, + struct device *dev, int desc, + void __iomem *baseaddr, + unsigned int possible_crtcs, + enum drm_plane_type type) { struct sti_gdp *gdp; + int res; gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL); if (!gdp) { @@ -577,8 +616,33 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id) return NULL; } - gdp->layer.ops = &gdp_ops; + gdp->dev = dev; + gdp->regs = baseaddr; + gdp->plane.desc = desc; + gdp->plane.status = STI_PLANE_DISABLED; + gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; - return (struct sti_layer *)gdp; + sti_gdp_init(gdp); + + res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane, + possible_crtcs, + &sti_plane_helpers_funcs, + gdp_supported_formats, + ARRAY_SIZE(gdp_supported_formats), + type); + if (res) { + DRM_ERROR("Failed to initialize universal plane\n"); + goto err; + } + + drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs); + + sti_plane_init_property(&gdp->plane, type); + + return &gdp->plane.drm_plane; + +err: + devm_kfree(dev, gdp); + return NULL; } diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h index 1dab68274ad3..73947a4a8004 100644 --- a/drivers/gpu/drm/sti/sti_gdp.h +++ b/drivers/gpu/drm/sti/sti_gdp.h @@ -11,6 +11,9 @@ #include <linux/types.h> -struct sti_layer *sti_gdp_create(struct device *dev, int id); - +struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, + struct device *dev, int desc, + void __iomem *baseaddr, + unsigned int possible_crtcs, + enum drm_plane_type type); #endif diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index f28a4d54487c..09e29e43423e 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector) return count; fail: - DRM_ERROR("Can not read HDMI EDID\n"); + DRM_ERROR("Can't read HDMI EDID\n"); return 0; } @@ -693,21 +693,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) struct sti_hdmi_connector *connector; struct drm_connector *drm_connector; struct drm_bridge *bridge; - struct device_node *ddc; int err; - ddc = of_parse_phandle(dev->of_node, "ddc", 0); - if (ddc) { - hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc); - if (!hdmi->ddc_adapt) { - err = -EPROBE_DEFER; - of_node_put(ddc); - return err; - } - - of_node_put(ddc); - } - /* Set the drm device handle */ hdmi->drm_dev = drm_dev; @@ -796,6 +783,7 @@ static int sti_hdmi_probe(struct platform_device *pdev) struct sti_hdmi *hdmi; struct device_node *np = dev->of_node; struct resource *res; + struct device_node *ddc; int ret; DRM_INFO("%s\n", __func__); @@ -804,6 +792,17 @@ static int sti_hdmi_probe(struct platform_device *pdev) if (!hdmi) return -ENOMEM; + ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0); + if (ddc) { + hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc); + if (!hdmi->ddc_adapt) { + of_node_put(ddc); + return -EPROBE_DEFER; + } + + of_node_put(ddc); + } + hdmi->dev = pdev->dev; /* Get resources */ diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index b0eb62de1b2e..7c8f9b8bfae1 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -12,11 +12,12 @@ #include <linux/reset.h> #include <drm/drmP.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> -#include "sti_drm_plane.h" -#include "sti_hqvdp.h" +#include "sti_compositor.h" #include "sti_hqvdp_lut.h" -#include "sti_layer.h" +#include "sti_plane.h" #include "sti_vtg.h" /* Firmware name */ @@ -322,8 +323,7 @@ struct sti_hqvdp_cmd { * @dev: driver device * @drm_dev: the drm device * @regs: registers - * @layer: layer structure for hqvdp it self - * @vid_plane: VID plug used as link with compositor IP + * @plane: plane structure for hqvdp it self * @clk: IP clock * @clk_pix_main: pix main clock * @reset: reset control @@ -334,13 +334,13 @@ struct sti_hqvdp_cmd { * @hqvdp_cmd: buffer of commands * @hqvdp_cmd_paddr: physical address of hqvdp_cmd * @vtg: vtg for main data path + * @xp70_initialized: true if xp70 is already initialized */ struct sti_hqvdp { struct device *dev; struct drm_device *drm_dev; void __iomem *regs; - struct sti_layer layer; - struct drm_plane *vid_plane; + struct sti_plane plane; struct clk *clk; struct clk *clk_pix_main; struct reset_control *reset; @@ -351,24 +351,15 @@ struct sti_hqvdp { void *hqvdp_cmd; dma_addr_t hqvdp_cmd_paddr; struct sti_vtg *vtg; + bool xp70_initialized; }; -#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer) +#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane) static const uint32_t hqvdp_supported_formats[] = { DRM_FORMAT_NV12, }; -static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer) -{ - return hqvdp_supported_formats; -} - -static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer) -{ - return ARRAY_SIZE(hqvdp_supported_formats); -} - /** * sti_hqvdp_get_free_cmd * @hqvdp: hqvdp structure @@ -484,7 +475,12 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale, /** * sti_hqvdp_check_hw_scaling - * @layer: hqvdp layer + * @hqvdp: hqvdp pointer + * @mode: display mode with timing constraints + * @src_w: source width + * @src_h: source height + * @dst_w: destination width + * @dst_h: destination height * * Check if the HW is able to perform the scaling request * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where: @@ -498,184 +494,36 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale, * RETURNS: * True if the HW can scale. */ -static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer) +static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp, + struct drm_display_mode *mode, + int src_w, int src_h, + int dst_w, int dst_h) { - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); unsigned long lfw; unsigned int inv_zy; - lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); - lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000; + lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); + lfw /= max(src_w, dst_w) * mode->clock / 1000; - inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h); + inv_zy = DIV_ROUND_UP(src_h, dst_h); return (inv_zy <= lfw) ? true : false; } /** - * sti_hqvdp_prepare_layer - * @layer: hqvdp layer - * @first_prepare: true if it is the first time this function is called + * sti_hqvdp_disable + * @hqvdp: hqvdp pointer * - * Prepares a command for the firmware - * - * RETURNS: - * 0 on success. + * Disables the HQVDP plane */ -static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare) -{ - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); - struct sti_hqvdp_cmd *cmd; - int scale_h, scale_v; - int cmd_offset; - - dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); - - /* prepare and commit VID plane */ - hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane, - layer->crtc, layer->fb, - layer->dst_x, layer->dst_y, - layer->dst_w, layer->dst_h, - layer->src_x, layer->src_y, - layer->src_w, layer->src_h); - - cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); - if (cmd_offset == -1) { - DRM_ERROR("No available hqvdp_cmd now\n"); - return -EBUSY; - } - cmd = hqvdp->hqvdp_cmd + cmd_offset; - - if (!sti_hqvdp_check_hw_scaling(layer)) { - DRM_ERROR("Scaling beyond HW capabilities\n"); - return -EINVAL; - } - - /* Static parameters, defaulting to progressive mode */ - cmd->top.config = TOP_CONFIG_PROGRESSIVE; - cmd->top.mem_format = TOP_MEM_FORMAT_DFLT; - cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT; - cmd->csdi.config = CSDI_CONFIG_PROG; - - /* VC1RE, FMD bypassed : keep everything set to 0 - * IQI/P2I bypassed */ - cmd->iqi.config = IQI_CONFIG_DFLT; - cmd->iqi.con_bri = IQI_CON_BRI_DFLT; - cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT; - cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; - - /* Buffer planes address */ - cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0]; - cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1]; - - /* Pitches */ - cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch = - layer->pitches[0]; - cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch = - layer->pitches[1]; - - /* Input / output size - * Align to upper even value */ - layer->dst_w = ALIGN(layer->dst_w, 2); - layer->dst_h = ALIGN(layer->dst_h, 2); - - if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) || - (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) || - (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) || - (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) { - DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", - layer->src_w, layer->src_h, - layer->dst_w, layer->dst_h); - return -EINVAL; - } - cmd->top.input_viewport_size = cmd->top.input_frame_size = - layer->src_h << 16 | layer->src_w; - cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w; - cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x; - - /* Handle interlaced */ - if (layer->fb->flags & DRM_MODE_FB_INTERLACED) { - /* Top field to display */ - cmd->top.config = TOP_CONFIG_INTER_TOP; - - /* Update pitches and vert size */ - cmd->top.input_frame_size = (layer->src_h / 2) << 16 | - layer->src_w; - cmd->top.luma_processed_pitch *= 2; - cmd->top.luma_src_pitch *= 2; - cmd->top.chroma_processed_pitch *= 2; - cmd->top.chroma_src_pitch *= 2; - - /* Enable directional deinterlacing processing */ - cmd->csdi.config = CSDI_CONFIG_INTER_DIR; - cmd->csdi.config2 = CSDI_CONFIG2_DFLT; - cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT; - } - - /* Update hvsrc lut coef */ - scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w; - sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); - - scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h; - sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); - - if (first_prepare) { - /* Prevent VTG shutdown */ - if (clk_prepare_enable(hqvdp->clk_pix_main)) { - DRM_ERROR("Failed to prepare/enable pix main clk\n"); - return -ENXIO; - } - - /* Register VTG Vsync callback to handle bottom fields */ - if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && - sti_vtg_register_client(hqvdp->vtg, - &hqvdp->vtg_nb, layer->mixer_id)) { - DRM_ERROR("Cannot register VTG notifier\n"); - return -ENXIO; - } - } - - return 0; -} - -static int sti_hqvdp_commit_layer(struct sti_layer *layer) +static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp) { - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); - int cmd_offset; - - dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); - - cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); - if (cmd_offset == -1) { - DRM_ERROR("No available hqvdp_cmd now\n"); - return -EBUSY; - } - - writel(hqvdp->hqvdp_cmd_paddr + cmd_offset, - hqvdp->regs + HQVDP_MBX_NEXT_CMD); - - hqvdp->curr_field_count++; - - /* Interlaced : get ready to display the bottom field at next Vsync */ - if (layer->fb->flags & DRM_MODE_FB_INTERLACED) - hqvdp->btm_field_pending = true; - - dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", - __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset); - - return 0; -} - -static int sti_hqvdp_disable_layer(struct sti_layer *layer) -{ - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); int i; - DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); + DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane)); /* Unregister VTG Vsync callback */ - if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && - sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) + if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); /* Set next cmd to NULL */ @@ -691,15 +539,10 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer) /* VTG can stop now */ clk_disable_unprepare(hqvdp->clk_pix_main); - if (i == POLL_MAX_ATTEMPT) { + if (i == POLL_MAX_ATTEMPT) DRM_ERROR("XP70 could not revert to idle\n"); - return -ENXIO; - } - - /* disable VID plane */ - hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane); - return 0; + hqvdp->plane.status = STI_PLANE_DISABLED; } /** @@ -724,6 +567,14 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data) return 0; } + if (hqvdp->plane.status == STI_PLANE_FLUSHING) { + /* disable need to be synchronize on vsync event */ + DRM_DEBUG_DRIVER("Vsync event received => disable %s\n", + sti_plane_to_str(&hqvdp->plane)); + + sti_hqvdp_disable(hqvdp); + } + if (hqvdp->btm_field_pending) { /* Create the btm field command from the current one */ btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); @@ -758,32 +609,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data) return 0; } -static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id) +static void sti_hqvdp_init(struct sti_hqvdp *hqvdp) { - struct drm_plane *plane; - - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { - struct sti_layer *layer = to_sti_layer(plane); - - if (layer->desc == id) - return plane; - } - - return NULL; -} - -static void sti_hqvd_init(struct sti_layer *layer) -{ - struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); int size; - /* find the plane macthing with vid 0 */ - hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0); - if (!hqvdp->vid_plane) { - DRM_ERROR("Cannot find Main video layer\n"); - return; - } - hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb; /* Allocate memory for the VDP commands */ @@ -799,24 +628,213 @@ static void sti_hqvd_init(struct sti_layer *layer) memset(hqvdp->hqvdp_cmd, 0, size); } -static const struct sti_layer_funcs hqvdp_ops = { - .get_formats = sti_hqvdp_get_formats, - .get_nb_formats = sti_hqvdp_get_nb_formats, - .init = sti_hqvd_init, - .prepare = sti_hqvdp_prepare_layer, - .commit = sti_hqvdp_commit_layer, - .disable = sti_hqvdp_disable_layer, +static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, + struct drm_plane_state *oldstate) +{ + struct drm_plane_state *state = drm_plane->state; + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); + struct drm_crtc *crtc = state->crtc; + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct drm_framebuffer *fb = state->fb; + struct drm_display_mode *mode = &crtc->mode; + int dst_x = state->crtc_x; + int dst_y = state->crtc_y; + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); + /* src_x are in 16.16 format */ + int src_x = state->src_x >> 16; + int src_y = state->src_y >> 16; + int src_w = state->src_w >> 16; + int src_h = state->src_h >> 16; + bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false; + struct drm_gem_cma_object *cma_obj; + struct sti_hqvdp_cmd *cmd; + int scale_h, scale_v; + int cmd_offset; + + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", + crtc->base.id, sti_mixer_to_str(mixer), + drm_plane->base.id, sti_plane_to_str(plane)); + DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", + sti_plane_to_str(plane), + dst_w, dst_h, dst_x, dst_y, + src_w, src_h, src_x, src_y); + + cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); + if (cmd_offset == -1) { + DRM_ERROR("No available hqvdp_cmd now\n"); + return; + } + cmd = hqvdp->hqvdp_cmd + cmd_offset; + + if (!sti_hqvdp_check_hw_scaling(hqvdp, mode, + src_w, src_h, + dst_w, dst_h)) { + DRM_ERROR("Scaling beyond HW capabilities\n"); + return; + } + + /* Static parameters, defaulting to progressive mode */ + cmd->top.config = TOP_CONFIG_PROGRESSIVE; + cmd->top.mem_format = TOP_MEM_FORMAT_DFLT; + cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT; + cmd->csdi.config = CSDI_CONFIG_PROG; + + /* VC1RE, FMD bypassed : keep everything set to 0 + * IQI/P2I bypassed */ + cmd->iqi.config = IQI_CONFIG_DFLT; + cmd->iqi.con_bri = IQI_CON_BRI_DFLT; + cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT; + cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; + + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + if (!cma_obj) { + DRM_ERROR("Can't get CMA GEM object for fb\n"); + return; + } + + DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, + (char *)&fb->pixel_format, + (unsigned long)cma_obj->paddr); + + /* Buffer planes address */ + cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0]; + cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1]; + + /* Pitches */ + cmd->top.luma_processed_pitch = fb->pitches[0]; + cmd->top.luma_src_pitch = fb->pitches[0]; + cmd->top.chroma_processed_pitch = fb->pitches[1]; + cmd->top.chroma_src_pitch = fb->pitches[1]; + + /* Input / output size + * Align to upper even value */ + dst_w = ALIGN(dst_w, 2); + dst_h = ALIGN(dst_h, 2); + + if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) || + (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) || + (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) || + (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) { + DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", + src_w, src_h, + dst_w, dst_h); + return; + } + + cmd->top.input_viewport_size = src_h << 16 | src_w; + cmd->top.input_frame_size = src_h << 16 | src_w; + cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w; + cmd->top.input_viewport_ori = src_y << 16 | src_x; + + /* Handle interlaced */ + if (fb->flags & DRM_MODE_FB_INTERLACED) { + /* Top field to display */ + cmd->top.config = TOP_CONFIG_INTER_TOP; + + /* Update pitches and vert size */ + cmd->top.input_frame_size = (src_h / 2) << 16 | src_w; + cmd->top.luma_processed_pitch *= 2; + cmd->top.luma_src_pitch *= 2; + cmd->top.chroma_processed_pitch *= 2; + cmd->top.chroma_src_pitch *= 2; + + /* Enable directional deinterlacing processing */ + cmd->csdi.config = CSDI_CONFIG_INTER_DIR; + cmd->csdi.config2 = CSDI_CONFIG2_DFLT; + cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT; + } + + /* Update hvsrc lut coef */ + scale_h = SCALE_FACTOR * dst_w / src_w; + sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); + + scale_v = SCALE_FACTOR * dst_h / src_h; + sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); + + if (first_prepare) { + /* Prevent VTG shutdown */ + if (clk_prepare_enable(hqvdp->clk_pix_main)) { + DRM_ERROR("Failed to prepare/enable pix main clk\n"); + return; + } + + /* Register VTG Vsync callback to handle bottom fields */ + if (sti_vtg_register_client(hqvdp->vtg, + &hqvdp->vtg_nb, + mixer->id)) { + DRM_ERROR("Cannot register VTG notifier\n"); + return; + } + } + + writel(hqvdp->hqvdp_cmd_paddr + cmd_offset, + hqvdp->regs + HQVDP_MBX_NEXT_CMD); + + hqvdp->curr_field_count++; + + /* Interlaced : get ready to display the bottom field at next Vsync */ + if (fb->flags & DRM_MODE_FB_INTERLACED) + hqvdp->btm_field_pending = true; + + dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", + __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset); + + plane->status = STI_PLANE_UPDATED; +} + +static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane, + struct drm_plane_state *oldstate) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc); + + if (!drm_plane->crtc) { + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", + drm_plane->base.id); + return; + } + + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", + drm_plane->crtc->base.id, sti_mixer_to_str(mixer), + drm_plane->base.id, sti_plane_to_str(plane)); + + plane->status = STI_PLANE_DISABLING; +} + +static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = { + .atomic_update = sti_hqvdp_atomic_update, + .atomic_disable = sti_hqvdp_atomic_disable, }; -struct sti_layer *sti_hqvdp_create(struct device *dev) +static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, + struct device *dev, int desc) { struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); + int res; + + hqvdp->plane.desc = desc; + hqvdp->plane.status = STI_PLANE_DISABLED; - hqvdp->layer.ops = &hqvdp_ops; + sti_hqvdp_init(hqvdp); - return &hqvdp->layer; + res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1, + &sti_plane_helpers_funcs, + hqvdp_supported_formats, + ARRAY_SIZE(hqvdp_supported_formats), + DRM_PLANE_TYPE_OVERLAY); + if (res) { + DRM_ERROR("Failed to initialize universal plane\n"); + return NULL; + } + + drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs); + + sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY); + + return &hqvdp->plane.drm_plane; } -EXPORT_SYMBOL(sti_hqvdp_create); static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp) { @@ -859,6 +877,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt) } *header; DRM_DEBUG_DRIVER("\n"); + + if (hqvdp->xp70_initialized) { + DRM_INFO("HQVDP XP70 already initialized\n"); + return; + } + /* Check firmware parts */ if (!firmware) { DRM_ERROR("Firmware not available\n"); @@ -946,7 +970,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt) /* Launch Vsync */ writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC); - DRM_INFO("HQVDP XP70 started\n"); + DRM_INFO("HQVDP XP70 initialized\n"); + + hqvdp->xp70_initialized = true; + out: release_firmware(firmware); } @@ -955,7 +982,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data) { struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); struct drm_device *drm_dev = data; - struct sti_layer *layer; + struct drm_plane *plane; int err; DRM_DEBUG_DRIVER("\n"); @@ -971,13 +998,10 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data) return err; } - layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs); - if (!layer) { + /* Create HQVDP plane once xp70 is initialized */ + plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0); + if (!plane) DRM_ERROR("Can't create HQVDP plane\n"); - return -ENOMEM; - } - - sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY); return 0; } diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h deleted file mode 100644 index cd5ecd0a6dea..000000000000 --- a/drivers/gpu/drm/sti/sti_hqvdp.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright (C) STMicroelectronics SA 2014 - * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. - * License terms: GNU General Public License (GPL), version 2 - */ - -#ifndef _STI_HQVDP_H_ -#define _STI_HQVDP_H_ - -struct sti_layer *sti_hqvdp_create(struct device *dev); - -#endif diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c deleted file mode 100644 index 899104f9d4bc..000000000000 --- a/drivers/gpu/drm/sti/sti_layer.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (C) STMicroelectronics SA 2014 - * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> - * Fabien Dessenne <fabien.dessenne@st.com> - * for STMicroelectronics. - * License terms: GNU General Public License (GPL), version 2 - */ - -#include <drm/drmP.h> -#include <drm/drm_gem_cma_helper.h> -#include <drm/drm_fb_cma_helper.h> - -#include "sti_compositor.h" -#include "sti_cursor.h" -#include "sti_gdp.h" -#include "sti_hqvdp.h" -#include "sti_layer.h" -#include "sti_vid.h" - -const char *sti_layer_to_str(struct sti_layer *layer) -{ - switch (layer->desc) { - case STI_GDP_0: - return "GDP0"; - case STI_GDP_1: - return "GDP1"; - case STI_GDP_2: - return "GDP2"; - case STI_GDP_3: - return "GDP3"; - case STI_VID_0: - return "VID0"; - case STI_VID_1: - return "VID1"; - case STI_CURSOR: - return "CURSOR"; - case STI_HQVDP_0: - return "HQVDP0"; - default: - return "<UNKNOWN LAYER>"; - } -} -EXPORT_SYMBOL(sti_layer_to_str); - -struct sti_layer *sti_layer_create(struct device *dev, int desc, - void __iomem *baseaddr) -{ - - struct sti_layer *layer = NULL; - - switch (desc & STI_LAYER_TYPE_MASK) { - case STI_GDP: - layer = sti_gdp_create(dev, desc); - break; - case STI_VID: - layer = sti_vid_create(dev); - break; - case STI_CUR: - layer = sti_cursor_create(dev); - break; - case STI_VDP: - layer = sti_hqvdp_create(dev); - break; - } - - if (!layer) { - DRM_ERROR("Failed to create layer\n"); - return NULL; - } - - layer->desc = desc; - layer->dev = dev; - layer->regs = baseaddr; - - layer->ops->init(layer); - - DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer)); - - return layer; -} -EXPORT_SYMBOL(sti_layer_create); - -int sti_layer_prepare(struct sti_layer *layer, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_display_mode *mode, int mixer_id, - int dest_x, int dest_y, int dest_w, int dest_h, - int src_x, int src_y, int src_w, int src_h) -{ - int ret; - unsigned int i; - struct drm_gem_cma_object *cma_obj; - - if (!layer || !fb || !mode) { - DRM_ERROR("Null fb, layer or mode\n"); - return 1; - } - - cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - if (!cma_obj) { - DRM_ERROR("Can't get CMA GEM object for fb\n"); - return 1; - } - - layer->crtc = crtc; - layer->fb = fb; - layer->mode = mode; - layer->mixer_id = mixer_id; - layer->dst_x = dest_x; - layer->dst_y = dest_y; - layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x); - layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y); - layer->src_x = src_x; - layer->src_y = src_y; - layer->src_w = src_w; - layer->src_h = src_h; - layer->format = fb->pixel_format; - layer->vaddr = cma_obj->vaddr; - layer->paddr = cma_obj->paddr; - for (i = 0; i < 4; i++) { - layer->pitches[i] = fb->pitches[i]; - layer->offsets[i] = fb->offsets[i]; - } - - DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n", - sti_layer_to_str(layer), - layer->mixer_id); - DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", - sti_layer_to_str(layer), - layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y, - layer->src_w, layer->src_h, layer->src_x, - layer->src_y); - - DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, - (char *)&layer->format, (unsigned long)layer->paddr); - - if (!layer->ops->prepare) - goto err_no_prepare; - - ret = layer->ops->prepare(layer, !layer->enabled); - if (!ret) - layer->enabled = true; - - return ret; - -err_no_prepare: - DRM_ERROR("Cannot prepare\n"); - return 1; -} - -int sti_layer_commit(struct sti_layer *layer) -{ - if (!layer) - return 1; - - if (!layer->ops->commit) - goto err_no_commit; - - return layer->ops->commit(layer); - -err_no_commit: - DRM_ERROR("Cannot commit\n"); - return 1; -} - -int sti_layer_disable(struct sti_layer *layer) -{ - int ret; - - DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); - if (!layer) - return 1; - - if (!layer->enabled) - return 0; - - if (!layer->ops->disable) - goto err_no_disable; - - ret = layer->ops->disable(layer); - if (!ret) - layer->enabled = false; - else - DRM_ERROR("Disable failed\n"); - - return ret; - -err_no_disable: - DRM_ERROR("Cannot disable\n"); - return 1; -} - -const uint32_t *sti_layer_get_formats(struct sti_layer *layer) -{ - if (!layer) - return NULL; - - if (!layer->ops->get_formats) - return NULL; - - return layer->ops->get_formats(layer); -} - -unsigned int sti_layer_get_nb_formats(struct sti_layer *layer) -{ - if (!layer) - return 0; - - if (!layer->ops->get_nb_formats) - return 0; - - return layer->ops->get_nb_formats(layer); -} diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h deleted file mode 100644 index ceff497f557e..000000000000 --- a/drivers/gpu/drm/sti/sti_layer.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (C) STMicroelectronics SA 2014 - * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> - * Fabien Dessenne <fabien.dessenne@st.com> - * for STMicroelectronics. - * License terms: GNU General Public License (GPL), version 2 - */ - -#ifndef _STI_LAYER_H_ -#define _STI_LAYER_H_ - -#include <drm/drmP.h> - -#define to_sti_layer(x) container_of(x, struct sti_layer, plane) - -#define STI_LAYER_TYPE_SHIFT 8 -#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1)) - -struct sti_layer; - -enum sti_layer_type { - STI_GDP = 1 << STI_LAYER_TYPE_SHIFT, - STI_VID = 2 << STI_LAYER_TYPE_SHIFT, - STI_CUR = 3 << STI_LAYER_TYPE_SHIFT, - STI_BCK = 4 << STI_LAYER_TYPE_SHIFT, - STI_VDP = 5 << STI_LAYER_TYPE_SHIFT -}; - -enum sti_layer_id_of_type { - STI_ID_0 = 0, - STI_ID_1 = 1, - STI_ID_2 = 2, - STI_ID_3 = 3 -}; - -enum sti_layer_desc { - STI_GDP_0 = STI_GDP | STI_ID_0, - STI_GDP_1 = STI_GDP | STI_ID_1, - STI_GDP_2 = STI_GDP | STI_ID_2, - STI_GDP_3 = STI_GDP | STI_ID_3, - STI_VID_0 = STI_VID | STI_ID_0, - STI_VID_1 = STI_VID | STI_ID_1, - STI_HQVDP_0 = STI_VDP | STI_ID_0, - STI_CURSOR = STI_CUR, - STI_BACK = STI_BCK -}; - -/** - * STI layer functions structure - * - * @get_formats: get layer supported formats - * @get_nb_formats: get number of format supported - * @init: initialize the layer - * @prepare: prepare layer before rendering - * @commit: set layer for rendering - * @disable: disable layer - */ -struct sti_layer_funcs { - const uint32_t* (*get_formats)(struct sti_layer *layer); - unsigned int (*get_nb_formats)(struct sti_layer *layer); - void (*init)(struct sti_layer *layer); - int (*prepare)(struct sti_layer *layer, bool first_prepare); - int (*commit)(struct sti_layer *layer); - int (*disable)(struct sti_layer *layer); -}; - -/** - * STI layer structure - * - * @plane: drm plane it is bound to (if any) - * @fb: drm fb it is bound to - * @crtc: crtc it is bound to - * @mode: display mode - * @desc: layer type & id - * @device: driver device - * @regs: layer registers - * @ops: layer functions - * @zorder: layer z-order - * @mixer_id: id of the mixer used to display the layer - * @enabled: to know if the layer is active or not - * @src_x src_y: coordinates of the input (fb) area - * @src_w src_h: size of the input (fb) area - * @dst_x dst_y: coordinates of the output (crtc) area - * @dst_w dst_h: size of the output (crtc) area - * @format: format - * @pitches: pitch of 'planes' (eg: Y, U, V) - * @offsets: offset of 'planes' - * @vaddr: virtual address of the input buffer - * @paddr: physical address of the input buffer - */ -struct sti_layer { - struct drm_plane plane; - struct drm_framebuffer *fb; - struct drm_crtc *crtc; - struct drm_display_mode *mode; - enum sti_layer_desc desc; - struct device *dev; - void __iomem *regs; - const struct sti_layer_funcs *ops; - int zorder; - int mixer_id; - bool enabled; - int src_x, src_y; - int src_w, src_h; - int dst_x, dst_y; - int dst_w, dst_h; - uint32_t format; - unsigned int pitches[4]; - unsigned int offsets[4]; - void *vaddr; - dma_addr_t paddr; -}; - -struct sti_layer *sti_layer_create(struct device *dev, int desc, - void __iomem *baseaddr); -int sti_layer_prepare(struct sti_layer *layer, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_display_mode *mode, - int mixer_id, - int dest_x, int dest_y, - int dest_w, int dest_h, - int src_x, int src_y, - int src_w, int src_h); -int sti_layer_commit(struct sti_layer *layer); -int sti_layer_disable(struct sti_layer *layer); -const uint32_t *sti_layer_get_formats(struct sti_layer *layer); -unsigned int sti_layer_get_nb_formats(struct sti_layer *layer); -const char *sti_layer_to_str(struct sti_layer *layer); - -#endif diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index 13a4b84deab6..0182e9365004 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -58,6 +58,7 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer) return "<UNKNOWN MIXER>"; } } +EXPORT_SYMBOL(sti_mixer_to_str); static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id) { @@ -101,52 +102,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer, sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds); } -int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) +int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane) { - int layer_id = 0, depth = layer->zorder; + int plane_id, depth = plane->zorder; + unsigned int i; u32 mask, val; - if (depth >= GAM_MIXER_NB_DEPTH_LEVEL) + if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL)) return 1; - switch (layer->desc) { + switch (plane->desc) { case STI_GDP_0: - layer_id = GAM_DEPTH_GDP0_ID; + plane_id = GAM_DEPTH_GDP0_ID; break; case STI_GDP_1: - layer_id = GAM_DEPTH_GDP1_ID; + plane_id = GAM_DEPTH_GDP1_ID; break; case STI_GDP_2: - layer_id = GAM_DEPTH_GDP2_ID; + plane_id = GAM_DEPTH_GDP2_ID; break; case STI_GDP_3: - layer_id = GAM_DEPTH_GDP3_ID; + plane_id = GAM_DEPTH_GDP3_ID; break; - case STI_VID_0: case STI_HQVDP_0: - layer_id = GAM_DEPTH_VID0_ID; - break; - case STI_VID_1: - layer_id = GAM_DEPTH_VID1_ID; + plane_id = GAM_DEPTH_VID0_ID; break; case STI_CURSOR: /* no need to set depth for cursor */ return 0; default: - DRM_ERROR("Unknown layer %d\n", layer->desc); + DRM_ERROR("Unknown plane %d\n", plane->desc); return 1; } - mask = GAM_DEPTH_MASK_ID << (3 * depth); - layer_id = layer_id << (3 * depth); + + /* Search if a previous depth was already assigned to the plane */ + val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB); + for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) { + mask = GAM_DEPTH_MASK_ID << (3 * i); + if ((val & mask) == plane_id << (3 * i)) + break; + } + + mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1)); + plane_id = plane_id << (3 * (depth - 1)); DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer), - sti_layer_to_str(layer), depth); + sti_plane_to_str(plane), depth); dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n", - layer_id, mask); + plane_id, mask); - val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB); val &= ~mask; - val |= layer_id; + val |= plane_id; sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val); dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n", @@ -176,9 +182,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer, return 0; } -static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) +static u32 sti_mixer_get_plane_mask(struct sti_plane *plane) { - switch (layer->desc) { + switch (plane->desc) { case STI_BACK: return GAM_CTL_BACK_MASK; case STI_GDP_0: @@ -189,11 +195,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) return GAM_CTL_GDP2_MASK; case STI_GDP_3: return GAM_CTL_GDP3_MASK; - case STI_VID_0: case STI_HQVDP_0: return GAM_CTL_VID0_MASK; - case STI_VID_1: - return GAM_CTL_VID1_MASK; case STI_CURSOR: return GAM_CTL_CURSOR_MASK; default: @@ -201,17 +204,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) } } -int sti_mixer_set_layer_status(struct sti_mixer *mixer, - struct sti_layer *layer, bool status) +int sti_mixer_set_plane_status(struct sti_mixer *mixer, + struct sti_plane *plane, bool status) { u32 mask, val; DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable", - sti_mixer_to_str(mixer), sti_layer_to_str(layer)); + sti_mixer_to_str(mixer), sti_plane_to_str(plane)); - mask = sti_mixer_get_layer_mask(layer); + mask = sti_mixer_get_plane_mask(plane); if (!mask) { - DRM_ERROR("Can not find layer mask\n"); + DRM_ERROR("Can't find layer mask\n"); return -EINVAL; } @@ -223,15 +226,6 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer, return 0; } -void sti_mixer_clear_all_layers(struct sti_mixer *mixer) -{ - u32 val; - - DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer)); - val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000; - sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); -} - void sti_mixer_set_matrix(struct sti_mixer *mixer) { unsigned int i; diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h index b97282182908..efb1a9a5ba86 100644 --- a/drivers/gpu/drm/sti/sti_mixer.h +++ b/drivers/gpu/drm/sti/sti_mixer.h @@ -11,10 +11,16 @@ #include <drm/drmP.h> -#include "sti_layer.h" +#include "sti_plane.h" #define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc) +enum sti_mixer_status { + STI_MIXER_READY, + STI_MIXER_DISABLING, + STI_MIXER_DISABLED, +}; + /** * STI Mixer subdevice structure * @@ -23,33 +29,32 @@ * @id: id of the mixer * @drm_crtc: crtc object link to the mixer * @pending_event: set if a flip event is pending on crtc - * @enabled: to know if the mixer is active or not + * @status: to know the status of the mixer */ struct sti_mixer { struct device *dev; void __iomem *regs; int id; - struct drm_crtc drm_crtc; + struct drm_crtc drm_crtc; struct drm_pending_vblank_event *pending_event; - bool enabled; + enum sti_mixer_status status; }; const char *sti_mixer_to_str(struct sti_mixer *mixer); struct sti_mixer *sti_mixer_create(struct device *dev, int id, - void __iomem *baseaddr); + void __iomem *baseaddr); -int sti_mixer_set_layer_status(struct sti_mixer *mixer, - struct sti_layer *layer, bool status); -void sti_mixer_clear_all_layers(struct sti_mixer *mixer); -int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer); +int sti_mixer_set_plane_status(struct sti_mixer *mixer, + struct sti_plane *plane, bool status); +int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane); int sti_mixer_active_video_area(struct sti_mixer *mixer, - struct drm_display_mode *mode); + struct drm_display_mode *mode); void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); /* depth in Cross-bar control = z order */ -#define GAM_MIXER_NB_DEPTH_LEVEL 7 +#define GAM_MIXER_NB_DEPTH_LEVEL 6 #define STI_MIXER_MAIN 0 #define STI_MIXER_AUX 1 diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c new file mode 100644 index 000000000000..d5c5e91f2956 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_plane.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <drm/drmP.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> + +#include "sti_compositor.h" +#include "sti_drv.h" +#include "sti_plane.h" + +/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */ +enum sti_plane_desc sti_plane_default_zorder[] = { + STI_GDP_0, + STI_GDP_1, + STI_HQVDP_0, + STI_GDP_2, + STI_GDP_3, +}; + +const char *sti_plane_to_str(struct sti_plane *plane) +{ + switch (plane->desc) { + case STI_GDP_0: + return "GDP0"; + case STI_GDP_1: + return "GDP1"; + case STI_GDP_2: + return "GDP2"; + case STI_GDP_3: + return "GDP3"; + case STI_HQVDP_0: + return "HQVDP0"; + case STI_CURSOR: + return "CURSOR"; + default: + return "<UNKNOWN PLANE>"; + } +} +EXPORT_SYMBOL(sti_plane_to_str); + +static void sti_plane_destroy(struct drm_plane *drm_plane) +{ + DRM_DEBUG_DRIVER("\n"); + + drm_plane_helper_disable(drm_plane); + drm_plane_cleanup(drm_plane); +} + +static int sti_plane_set_property(struct drm_plane *drm_plane, + struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = drm_plane->dev; + struct sti_private *private = dev->dev_private; + struct sti_plane *plane = to_sti_plane(drm_plane); + + DRM_DEBUG_DRIVER("\n"); + + if (property == private->plane_zorder_property) { + plane->zorder = val; + return 0; + } + + return -EINVAL; +} + +static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane) +{ + struct drm_device *dev = drm_plane->dev; + struct sti_private *private = dev->dev_private; + struct sti_plane *plane = to_sti_plane(drm_plane); + struct drm_property *prop; + + prop = private->plane_zorder_property; + if (!prop) { + prop = drm_property_create_range(dev, 0, "zpos", 1, + GAM_MIXER_NB_DEPTH_LEVEL); + if (!prop) + return; + + private->plane_zorder_property = prop; + } + + drm_object_attach_property(&drm_plane->base, prop, plane->zorder); +} + +void sti_plane_init_property(struct sti_plane *plane, + enum drm_plane_type type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++) + if (sti_plane_default_zorder[i] == plane->desc) + break; + + plane->zorder = i + 1; + + if (type == DRM_PLANE_TYPE_OVERLAY) + sti_plane_attach_zorder_property(&plane->drm_plane); + + DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n", + plane->drm_plane.base.id, + sti_plane_to_str(plane), plane->zorder); +} +EXPORT_SYMBOL(sti_plane_init_property); + +struct drm_plane_funcs sti_plane_helpers_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = sti_plane_destroy, + .set_property = sti_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; +EXPORT_SYMBOL(sti_plane_helpers_funcs); diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h new file mode 100644 index 000000000000..86f1e6fc81b9 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_plane.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_PLANE_H_ +#define _STI_PLANE_H_ + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> + +extern struct drm_plane_funcs sti_plane_helpers_funcs; + +#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane) + +#define STI_PLANE_TYPE_SHIFT 8 +#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1)) + +enum sti_plane_type { + STI_GDP = 1 << STI_PLANE_TYPE_SHIFT, + STI_VDP = 2 << STI_PLANE_TYPE_SHIFT, + STI_CUR = 3 << STI_PLANE_TYPE_SHIFT, + STI_BCK = 4 << STI_PLANE_TYPE_SHIFT +}; + +enum sti_plane_id_of_type { + STI_ID_0 = 0, + STI_ID_1 = 1, + STI_ID_2 = 2, + STI_ID_3 = 3 +}; + +enum sti_plane_desc { + STI_GDP_0 = STI_GDP | STI_ID_0, + STI_GDP_1 = STI_GDP | STI_ID_1, + STI_GDP_2 = STI_GDP | STI_ID_2, + STI_GDP_3 = STI_GDP | STI_ID_3, + STI_HQVDP_0 = STI_VDP | STI_ID_0, + STI_CURSOR = STI_CUR, + STI_BACK = STI_BCK +}; + +enum sti_plane_status { + STI_PLANE_READY, + STI_PLANE_UPDATED, + STI_PLANE_DISABLING, + STI_PLANE_FLUSHING, + STI_PLANE_DISABLED, +}; + +/** + * STI plane structure + * + * @plane: drm plane it is bound to (if any) + * @desc: plane type & id + * @status: to know the status of the plane + * @zorder: plane z-order + */ +struct sti_plane { + struct drm_plane drm_plane; + enum sti_plane_desc desc; + enum sti_plane_status status; + int zorder; +}; + +const char *sti_plane_to_str(struct sti_plane *plane); +void sti_plane_init_property(struct sti_plane *plane, + enum drm_plane_type type); +#endif diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index 5cc53116508e..c1aac8e66fb5 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -16,7 +16,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> -#include "sti_drm_crtc.h" +#include "sti_crtc.h" /* glue registers */ #define TVO_CSC_MAIN_M0 0x000 @@ -473,7 +473,7 @@ static void sti_dvo_encoder_commit(struct drm_encoder *encoder) { struct sti_tvout *tvout = to_sti_tvout(encoder); - tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); + tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc)); } static void sti_dvo_encoder_disable(struct drm_encoder *encoder) @@ -523,7 +523,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder) { struct sti_tvout *tvout = to_sti_tvout(encoder); - tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); + tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc)); } static void sti_hda_encoder_disable(struct drm_encoder *encoder) @@ -575,7 +575,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder) { struct sti_tvout *tvout = to_sti_tvout(encoder); - tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); + tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc)); } static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) @@ -644,7 +644,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data) struct sti_tvout *tvout = dev_get_drvdata(dev); struct drm_device *drm_dev = data; unsigned int i; - int ret; tvout->drm_dev = drm_dev; @@ -658,17 +657,15 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data) sti_tvout_create_encoders(drm_dev, tvout); - ret = component_bind_all(dev, drm_dev); - if (ret) - sti_tvout_destroy_encoders(tvout); - - return ret; + return 0; } static void sti_tvout_unbind(struct device *dev, struct device *master, void *data) { - /* do nothing */ + struct sti_tvout *tvout = dev_get_drvdata(dev); + + sti_tvout_destroy_encoders(tvout); } static const struct component_ops sti_tvout_ops = { @@ -676,34 +673,12 @@ static const struct component_ops sti_tvout_ops = { .unbind = sti_tvout_unbind, }; -static int compare_of(struct device *dev, void *data) -{ - return dev->of_node == data; -} - -static int sti_tvout_master_bind(struct device *dev) -{ - return 0; -} - -static void sti_tvout_master_unbind(struct device *dev) -{ - /* do nothing */ -} - -static const struct component_master_ops sti_tvout_master_ops = { - .bind = sti_tvout_master_bind, - .unbind = sti_tvout_master_unbind, -}; - static int sti_tvout_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct sti_tvout *tvout; struct resource *res; - struct device_node *child_np; - struct component_match *match = NULL; DRM_INFO("%s\n", __func__); @@ -734,24 +709,11 @@ static int sti_tvout_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tvout); - of_platform_populate(node, NULL, NULL, dev); - - child_np = of_get_next_available_child(node, NULL); - - while (child_np) { - component_match_add(dev, &match, compare_of, child_np); - of_node_put(child_np); - child_np = of_get_next_available_child(node, child_np); - } - - component_master_add_with_match(dev, &sti_tvout_master_ops, match); - return component_add(dev, &sti_tvout_ops); } static int sti_tvout_remove(struct platform_device *pdev) { - component_master_del(&pdev->dev, &sti_tvout_master_ops); component_del(&pdev->dev, &sti_tvout_ops); return 0; } diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c index 10ced6a479f4..a8254cc362a1 100644 --- a/drivers/gpu/drm/sti/sti_vid.c +++ b/drivers/gpu/drm/sti/sti_vid.c @@ -6,7 +6,7 @@ #include <drm/drmP.h> -#include "sti_layer.h" +#include "sti_plane.h" #include "sti_vid.h" #include "sti_vtg.h" @@ -43,35 +43,37 @@ #define VID_MPR2_BT709 0x07150545 #define VID_MPR3_BT709 0x00000AE8 -static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare) +void sti_vid_commit(struct sti_vid *vid, + struct drm_plane_state *state) { - u32 val; + struct drm_crtc *crtc = state->crtc; + struct drm_display_mode *mode = &crtc->mode; + int dst_x = state->crtc_x; + int dst_y = state->crtc_y; + int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); + int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); + u32 val, ydo, xdo, yds, xds; + + /* Input / output size + * Align to upper even value */ + dst_w = ALIGN(dst_w, 2); + dst_h = ALIGN(dst_h, 2); /* Unmask */ val = readl(vid->regs + VID_CTL); val &= ~VID_CTL_IGNORE; writel(val, vid->regs + VID_CTL); - return 0; -} - -static int sti_vid_commit_layer(struct sti_layer *vid) -{ - struct drm_display_mode *mode = vid->mode; - u32 ydo, xdo, yds, xds; - - ydo = sti_vtg_get_line_number(*mode, vid->dst_y); - yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1); - xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x); - xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1); + ydo = sti_vtg_get_line_number(*mode, dst_y); + yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1); + xdo = sti_vtg_get_pixel_number(*mode, dst_x); + xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1); writel((ydo << 16) | xdo, vid->regs + VID_VPO); writel((yds << 16) | xds, vid->regs + VID_VPS); - - return 0; } -static int sti_vid_disable_layer(struct sti_layer *vid) +void sti_vid_disable(struct sti_vid *vid) { u32 val; @@ -79,21 +81,9 @@ static int sti_vid_disable_layer(struct sti_layer *vid) val = readl(vid->regs + VID_CTL); val |= VID_CTL_IGNORE; writel(val, vid->regs + VID_CTL); - - return 0; } -static const uint32_t *sti_vid_get_formats(struct sti_layer *layer) -{ - return NULL; -} - -static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer) -{ - return 0; -} - -static void sti_vid_init(struct sti_layer *vid) +static void sti_vid_init(struct sti_vid *vid) { /* Enable PSI, Mask layer */ writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL); @@ -113,18 +103,10 @@ static void sti_vid_init(struct sti_layer *vid) writel(VID_CSAT_DFLT, vid->regs + VID_CSAT); } -static const struct sti_layer_funcs vid_ops = { - .get_formats = sti_vid_get_formats, - .get_nb_formats = sti_vid_get_nb_formats, - .init = sti_vid_init, - .prepare = sti_vid_prepare_layer, - .commit = sti_vid_commit_layer, - .disable = sti_vid_disable_layer, -}; - -struct sti_layer *sti_vid_create(struct device *dev) +struct sti_vid *sti_vid_create(struct device *dev, int id, + void __iomem *baseaddr) { - struct sti_layer *vid; + struct sti_vid *vid; vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL); if (!vid) { @@ -132,7 +114,11 @@ struct sti_layer *sti_vid_create(struct device *dev) return NULL; } - vid->ops = &vid_ops; + vid->dev = dev; + vid->regs = baseaddr; + vid->id = id; + + sti_vid_init(vid); return vid; } diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h index 2c0aecd63294..5dea4791f1d6 100644 --- a/drivers/gpu/drm/sti/sti_vid.h +++ b/drivers/gpu/drm/sti/sti_vid.h @@ -7,6 +7,23 @@ #ifndef _STI_VID_H_ #define _STI_VID_H_ -struct sti_layer *sti_vid_create(struct device *dev); +/** + * STI VID structure + * + * @dev: driver device + * @regs: vid registers + * @id: id of the vid + */ +struct sti_vid { + struct device *dev; + void __iomem *regs; + int id; +}; + +void sti_vid_commit(struct sti_vid *vid, + struct drm_plane_state *state); +void sti_vid_disable(struct sti_vid *vid); +struct sti_vid *sti_vid_create(struct device *dev, int id, + void __iomem *baseaddr); #endif diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index bf8ef3133e5b..ddefb85dc4f7 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -76,6 +76,14 @@ to_tegra_plane_state(struct drm_plane_state *state) return NULL; } +static void tegra_dc_stats_reset(struct tegra_dc_stats *stats) +{ + stats->frames = 0; + stats->vblank = 0; + stats->underflow = 0; + stats->overflow = 0; +} + /* * Reads the active copy of a register. This takes the dc->lock spinlock to * prevent races with the VBLANK processing which also needs access to the @@ -759,7 +767,6 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, /* position the cursor */ value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff); tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); - } static void tegra_cursor_atomic_disable(struct drm_plane *plane, @@ -809,9 +816,11 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, return ERR_PTR(-ENOMEM); /* - * We'll treat the cursor as an overlay plane with index 6 here so - * that the update and activation request bits in DC_CMD_STATE_CONTROL - * match up. + * This index is kind of fake. The cursor isn't a regular plane, but + * its update and activation request bits in DC_CMD_STATE_CONTROL do + * use the same programming. Setting this fake index here allows the + * code in tegra_add_plane_state() to do the right thing without the + * need to special-casing the cursor plane. */ plane->index = 6; @@ -1015,6 +1024,8 @@ static void tegra_crtc_reset(struct drm_crtc *crtc) crtc->state = &state->base; crtc->state->crtc = crtc; } + + drm_crtc_vblank_reset(crtc); } static struct drm_crtc_state * @@ -1052,90 +1063,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = { .atomic_destroy_state = tegra_crtc_atomic_destroy_state, }; -static void tegra_dc_stop(struct tegra_dc *dc) -{ - u32 value; - - /* stop the display controller */ - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - tegra_dc_commit(dc); -} - -static bool tegra_dc_idle(struct tegra_dc *dc) -{ - u32 value; - - value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND); - - return (value & DISP_CTRL_MODE_MASK) == 0; -} - -static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout) -{ - timeout = jiffies + msecs_to_jiffies(timeout); - - while (time_before(jiffies, timeout)) { - if (tegra_dc_idle(dc)) - return 0; - - usleep_range(1000, 2000); - } - - dev_dbg(dc->dev, "timeout waiting for DC to become idle\n"); - return -ETIMEDOUT; -} - -static void tegra_crtc_disable(struct drm_crtc *crtc) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); - u32 value; - - if (!tegra_dc_idle(dc)) { - tegra_dc_stop(dc); - - /* - * Ignore the return value, there isn't anything useful to do - * in case this fails. - */ - tegra_dc_wait_idle(dc, 100); - } - - /* - * This should really be part of the RGB encoder driver, but clearing - * these bits has the side-effect of stopping the display controller. - * When that happens no VBLANK interrupts will be raised. At the same - * time the encoder is disabled before the display controller, so the - * above code is always going to timeout waiting for the controller - * to go idle. - * - * Given the close coupling between the RGB encoder and the display - * controller doing it here is still kind of okay. None of the other - * encoder drivers require these bits to be cleared. - * - * XXX: Perhaps given that the display controller is switched off at - * this point anyway maybe clearing these bits isn't even useful for - * the RGB encoder? - */ - if (dc->rgb) { - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - } - - drm_crtc_vblank_off(crtc); -} - -static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - return true; -} - static int tegra_dc_set_timings(struct tegra_dc *dc, struct drm_display_mode *mode) { @@ -1229,7 +1156,85 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); } -static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) +static void tegra_dc_stop(struct tegra_dc *dc) +{ + u32 value; + + /* stop the display controller */ + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); + value &= ~DISP_CTRL_MODE_MASK; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); + + tegra_dc_commit(dc); +} + +static bool tegra_dc_idle(struct tegra_dc *dc) +{ + u32 value; + + value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND); + + return (value & DISP_CTRL_MODE_MASK) == 0; +} + +static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout) +{ + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + if (tegra_dc_idle(dc)) + return 0; + + usleep_range(1000, 2000); + } + + dev_dbg(dc->dev, "timeout waiting for DC to become idle\n"); + return -ETIMEDOUT; +} + +static void tegra_crtc_disable(struct drm_crtc *crtc) +{ + struct tegra_dc *dc = to_tegra_dc(crtc); + u32 value; + + if (!tegra_dc_idle(dc)) { + tegra_dc_stop(dc); + + /* + * Ignore the return value, there isn't anything useful to do + * in case this fails. + */ + tegra_dc_wait_idle(dc, 100); + } + + /* + * This should really be part of the RGB encoder driver, but clearing + * these bits has the side-effect of stopping the display controller. + * When that happens no VBLANK interrupts will be raised. At the same + * time the encoder is disabled before the display controller, so the + * above code is always going to timeout waiting for the controller + * to go idle. + * + * Given the close coupling between the RGB encoder and the display + * controller doing it here is still kind of okay. None of the other + * encoder drivers require these bits to be cleared. + * + * XXX: Perhaps given that the display controller is switched off at + * this point anyway maybe clearing these bits isn't even useful for + * the RGB encoder? + */ + if (dc->rgb) { + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); + value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | + PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); + } + + tegra_dc_stats_reset(&dc->stats); + drm_crtc_vblank_off(crtc); +} + +static void tegra_crtc_enable(struct drm_crtc *crtc) { struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct tegra_dc_state *state = to_dc_state(crtc->state); @@ -1259,15 +1264,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); tegra_dc_commit(dc); -} - -static void tegra_crtc_prepare(struct drm_crtc *crtc) -{ - drm_crtc_vblank_off(crtc); -} -static void tegra_crtc_commit(struct drm_crtc *crtc) -{ drm_crtc_vblank_on(crtc); } @@ -1304,10 +1301,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { .disable = tegra_crtc_disable, - .mode_fixup = tegra_crtc_mode_fixup, - .mode_set_nofb = tegra_crtc_mode_set_nofb, - .prepare = tegra_crtc_prepare, - .commit = tegra_crtc_commit, + .enable = tegra_crtc_enable, .atomic_check = tegra_crtc_atomic_check, .atomic_begin = tegra_crtc_atomic_begin, .atomic_flush = tegra_crtc_atomic_flush, @@ -1325,6 +1319,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data) /* dev_dbg(dc->dev, "%s(): frame end\n", __func__); */ + dc->stats.frames++; } if (status & VBLANK_INT) { @@ -1333,12 +1328,21 @@ static irqreturn_t tegra_dc_irq(int irq, void *data) */ drm_crtc_handle_vblank(&dc->base); tegra_dc_finish_page_flip(dc); + dc->stats.vblank++; } if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) { /* dev_dbg(dc->dev, "%s(): underflow\n", __func__); */ + dc->stats.underflow++; + } + + if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) { + /* + dev_dbg(dc->dev, "%s(): overflow\n", __func__); + */ + dc->stats.overflow++; } return IRQ_HANDLED; @@ -1348,6 +1352,14 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct tegra_dc *dc = node->info_ent->data; + int err = 0; + + drm_modeset_lock_crtc(&dc->base, NULL); + + if (!dc->base.state->active) { + err = -EBUSY; + goto unlock; + } #define DUMP_REG(name) \ seq_printf(s, "%-40s %#05x %08x\n", #name, name, \ @@ -1568,11 +1580,59 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data) #undef DUMP_REG +unlock: + drm_modeset_unlock_crtc(&dc->base); + return err; +} + +static int tegra_dc_show_crc(struct seq_file *s, void *data) +{ + struct drm_info_node *node = s->private; + struct tegra_dc *dc = node->info_ent->data; + int err = 0; + u32 value; + + drm_modeset_lock_crtc(&dc->base, NULL); + + if (!dc->base.state->active) { + err = -EBUSY; + goto unlock; + } + + value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE; + tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL); + tegra_dc_commit(dc); + + drm_crtc_wait_one_vblank(&dc->base); + drm_crtc_wait_one_vblank(&dc->base); + + value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM); + seq_printf(s, "%08x\n", value); + + tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL); + +unlock: + drm_modeset_unlock_crtc(&dc->base); + return err; +} + +static int tegra_dc_show_stats(struct seq_file *s, void *data) +{ + struct drm_info_node *node = s->private; + struct tegra_dc *dc = node->info_ent->data; + + seq_printf(s, "frames: %lu\n", dc->stats.frames); + seq_printf(s, "vblank: %lu\n", dc->stats.vblank); + seq_printf(s, "underflow: %lu\n", dc->stats.underflow); + seq_printf(s, "overflow: %lu\n", dc->stats.overflow); + return 0; } static struct drm_info_list debugfs_files[] = { { "regs", tegra_dc_show_regs, 0, NULL }, + { "crc", tegra_dc_show_crc, 0, NULL }, + { "stats", tegra_dc_show_stats, 0, NULL }, }; static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor) @@ -1718,7 +1778,8 @@ static int tegra_dc_init(struct host1x_client *client) tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC); } - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | @@ -1734,15 +1795,19 @@ static int tegra_dc_init(struct host1x_client *client) WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); - value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; + value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; tegra_dc_writel(dc, value, DC_CMD_INT_MASK); if (dc->soc->supports_border_color) tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); + tegra_dc_stats_reset(&dc->stats); + return 0; cleanup: @@ -1828,8 +1893,20 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = { .has_powergate = true, }; +static const struct tegra_dc_soc_info tegra210_dc_soc_info = { + .supports_border_color = false, + .supports_interlacing = true, + .supports_cursor = true, + .supports_block_linear = true, + .pitch_align = 64, + .has_powergate = true, +}; + static const struct of_device_id tegra_dc_of_match[] = { { + .compatible = "nvidia,tegra210-dc", + .data = &tegra210_dc_soc_info, + }, { .compatible = "nvidia,tegra124-dc", .data = &tegra124_dc_soc_info, }, { @@ -1959,6 +2036,10 @@ static int tegra_dc_probe(struct platform_device *pdev) return -ENXIO; } + dc->syncpt = host1x_syncpt_request(&pdev->dev, flags); + if (!dc->syncpt) + dev_warn(&pdev->dev, "failed to allocate syncpoint\n"); + INIT_LIST_HEAD(&dc->client.list); dc->client.ops = &dc_client_ops; dc->client.dev = &pdev->dev; @@ -1976,10 +2057,6 @@ static int tegra_dc_probe(struct platform_device *pdev) return err; } - dc->syncpt = host1x_syncpt_request(&pdev->dev, flags); - if (!dc->syncpt) - dev_warn(&pdev->dev, "failed to allocate syncpoint\n"); - platform_set_drvdata(pdev, dc); return 0; @@ -2018,7 +2095,6 @@ static int tegra_dc_remove(struct platform_device *pdev) struct platform_driver tegra_dc_driver = { .driver = { .name = "tegra-dc", - .owner = THIS_MODULE, .of_match_table = tegra_dc_of_match, }, .probe = tegra_dc_probe, diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 55792daabbb5..4a268635749b 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -86,6 +86,11 @@ #define DC_CMD_REG_ACT_CONTROL 0x043 #define DC_COM_CRC_CONTROL 0x300 +#define DC_COM_CRC_CONTROL_ALWAYS (1 << 3) +#define DC_COM_CRC_CONTROL_FULL_FRAME (0 << 2) +#define DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2) +#define DC_COM_CRC_CONTROL_WAIT (1 << 1) +#define DC_COM_CRC_CONTROL_ENABLE (1 << 0) #define DC_COM_CRC_CHECKSUM 0x301 #define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x)) #define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x)) @@ -114,15 +119,17 @@ #define DC_COM_CRC_CHECKSUM_LATCHED 0x329 #define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400 -#define H_PULSE_0_ENABLE (1 << 8) -#define H_PULSE_1_ENABLE (1 << 10) -#define H_PULSE_2_ENABLE (1 << 12) +#define H_PULSE0_ENABLE (1 << 8) +#define H_PULSE1_ENABLE (1 << 10) +#define H_PULSE2_ENABLE (1 << 12) #define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401 #define DC_DISP_DISP_WIN_OPTIONS 0x402 #define HDMI_ENABLE (1 << 30) #define DSI_ENABLE (1 << 29) +#define SOR1_TIMING_CYA (1 << 27) +#define SOR1_ENABLE (1 << 26) #define SOR_ENABLE (1 << 25) #define CURSOR_ENABLE (1 << 16) @@ -242,9 +249,20 @@ #define BASE_COLOR_SIZE565 (6 << 0) #define BASE_COLOR_SIZE332 (7 << 0) #define BASE_COLOR_SIZE888 (8 << 0) +#define DITHER_CONTROL_MASK (3 << 8) #define DITHER_CONTROL_DISABLE (0 << 8) #define DITHER_CONTROL_ORDERED (2 << 8) #define DITHER_CONTROL_ERRDIFF (3 << 8) +#define BASE_COLOR_SIZE_MASK (0xf << 0) +#define BASE_COLOR_SIZE_666 (0 << 0) +#define BASE_COLOR_SIZE_111 (1 << 0) +#define BASE_COLOR_SIZE_222 (2 << 0) +#define BASE_COLOR_SIZE_333 (3 << 0) +#define BASE_COLOR_SIZE_444 (4 << 0) +#define BASE_COLOR_SIZE_555 (5 << 0) +#define BASE_COLOR_SIZE_565 (6 << 0) +#define BASE_COLOR_SIZE_332 (7 << 0) +#define BASE_COLOR_SIZE_888 (8 << 0) #define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 #define SC1_H_QUALIFIER_NONE (1 << 16) diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 07b26972f487..224a7dc8e4ed 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -294,26 +294,41 @@ static int tegra_dpaux_probe(struct platform_device *pdev) } dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux"); - if (IS_ERR(dpaux->rst)) + if (IS_ERR(dpaux->rst)) { + dev_err(&pdev->dev, "failed to get reset control: %ld\n", + PTR_ERR(dpaux->rst)); return PTR_ERR(dpaux->rst); + } dpaux->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(dpaux->clk)) + if (IS_ERR(dpaux->clk)) { + dev_err(&pdev->dev, "failed to get module clock: %ld\n", + PTR_ERR(dpaux->clk)); return PTR_ERR(dpaux->clk); + } err = clk_prepare_enable(dpaux->clk); - if (err < 0) + if (err < 0) { + dev_err(&pdev->dev, "failed to enable module clock: %d\n", + err); return err; + } reset_control_deassert(dpaux->rst); dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent"); - if (IS_ERR(dpaux->clk_parent)) + if (IS_ERR(dpaux->clk_parent)) { + dev_err(&pdev->dev, "failed to get parent clock: %ld\n", + PTR_ERR(dpaux->clk_parent)); return PTR_ERR(dpaux->clk_parent); + } err = clk_prepare_enable(dpaux->clk_parent); - if (err < 0) + if (err < 0) { + dev_err(&pdev->dev, "failed to enable parent clock: %d\n", + err); return err; + } err = clk_set_rate(dpaux->clk_parent, 270000000); if (err < 0) { @@ -323,8 +338,11 @@ static int tegra_dpaux_probe(struct platform_device *pdev) } dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd"); - if (IS_ERR(dpaux->vdd)) + if (IS_ERR(dpaux->vdd)) { + dev_err(&pdev->dev, "failed to get VDD supply: %ld\n", + PTR_ERR(dpaux->vdd)); return PTR_ERR(dpaux->vdd); + } err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0, dev_name(dpaux->dev), dpaux); @@ -334,6 +352,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev) return err; } + disable_irq(dpaux->irq); + dpaux->aux.transfer = tegra_dpaux_transfer; dpaux->aux.dev = &pdev->dev; @@ -341,6 +361,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev) if (err < 0) return err; + /* + * Assume that by default the DPAUX/I2C pads will be used for HDMI, + * so power them up and configure them in I2C mode. + * + * The DPAUX code paths reconfigure the pads in AUX mode, but there + * is no possibility to perform the I2C mode configuration in the + * HDMI path. + */ + value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); + value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); + + value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL); + value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV | + DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | + DPAUX_HYBRID_PADCTL_MODE_I2C; + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL); + /* enable and clear all interrupts */ value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT | DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT; @@ -359,6 +397,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev) static int tegra_dpaux_remove(struct platform_device *pdev) { struct tegra_dpaux *dpaux = platform_get_drvdata(pdev); + u32 value; + + /* make sure pads are powered down when not in use */ + value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); + value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); drm_dp_aux_unregister(&dpaux->aux); @@ -376,6 +420,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev) } static const struct of_device_id tegra_dpaux_of_match[] = { + { .compatible = "nvidia,tegra210-dpaux", }, { .compatible = "nvidia,tegra124-dpaux", }, { }, }; @@ -425,8 +470,10 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output) enum drm_connector_status status; status = tegra_dpaux_detect(dpaux); - if (status == connector_status_connected) + if (status == connector_status_connected) { + enable_irq(dpaux->irq); return 0; + } usleep_range(1000, 2000); } @@ -439,6 +486,8 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux) unsigned long timeout; int err; + disable_irq(dpaux->irq); + err = regulator_disable(dpaux->vdd); if (err < 0) return err; diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h index 806e245ca787..20783d9f4728 100644 --- a/drivers/gpu/drm/tegra/dpaux.h +++ b/drivers/gpu/drm/tegra/dpaux.h @@ -57,6 +57,8 @@ #define DPAUX_DP_AUX_CONFIG 0x45 #define DPAUX_HYBRID_PADCTL 0x49 +#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15) +#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14) #define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12) #define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8) #define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 427f50c6803c..6d88cf1fcd1c 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -171,8 +171,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) if (err < 0) goto fbdev; - drm_mode_config_reset(drm); - /* * We don't use the drm_irq_install() helpers provided by the DRM * core, so we need to set this manually in order to allow the @@ -182,11 +180,14 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) /* syncpoints are used for full 32-bit hardware VBLANK counters */ drm->max_vblank_count = 0xffffffff; + drm->vblank_disable_allowed = true; err = drm_vblank_init(drm, drm->mode_config.num_crtc); if (err < 0) goto device; + drm_mode_config_reset(drm); + err = tegra_drm_fb_init(drm); if (err < 0) goto vblank; @@ -1037,9 +1038,8 @@ static int host1x_drm_resume(struct device *dev) } #endif -static const struct dev_pm_ops host1x_drm_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume) -}; +static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, + host1x_drm_resume); static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra20-dc", }, @@ -1056,6 +1056,12 @@ static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra124-dc", }, { .compatible = "nvidia,tegra124-sor", }, { .compatible = "nvidia,tegra124-hdmi", }, + { .compatible = "nvidia,tegra124-dsi", }, + { .compatible = "nvidia,tegra132-dsi", }, + { .compatible = "nvidia,tegra210-dc", }, + { .compatible = "nvidia,tegra210-dsi", }, + { .compatible = "nvidia,tegra210-sor", }, + { .compatible = "nvidia,tegra210-sor1", }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 659b2fcc986d..ec49275ffb24 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -12,6 +12,7 @@ #include <uapi/drm/tegra_drm.h> #include <linux/host1x.h> +#include <linux/of_gpio.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -104,6 +105,13 @@ int tegra_drm_exit(struct tegra_drm *tegra); struct tegra_dc_soc_info; struct tegra_output; +struct tegra_dc_stats { + unsigned long frames; + unsigned long vblank; + unsigned long underflow; + unsigned long overflow; +}; + struct tegra_dc { struct host1x_client client; struct host1x_syncpt *syncpt; @@ -121,6 +129,7 @@ struct tegra_dc { struct tegra_output *rgb; + struct tegra_dc_stats stats; struct list_head list; struct drm_info_list *debugfs_files; @@ -200,6 +209,7 @@ struct tegra_output { const struct edid *edid; unsigned int hpd_irq; int hpd_gpio; + enum of_gpio_flags hpd_gpio_flags; struct drm_encoder encoder; struct drm_connector connector; diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index dc97c0b3681d..f0a138ef68ce 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -119,6 +119,16 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct tegra_dsi *dsi = node->info_ent->data; + struct drm_crtc *crtc = dsi->output.encoder.crtc; + struct drm_device *drm = node->minor->dev; + int err = 0; + + drm_modeset_lock_all(drm); + + if (!crtc || !crtc->state->active) { + err = -EBUSY; + goto unlock; + } #define DUMP_REG(name) \ seq_printf(s, "%-32s %#05x %08x\n", #name, name, \ @@ -208,7 +218,9 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data) #undef DUMP_REG - return 0; +unlock: + drm_modeset_unlock_all(drm); + return err; } static struct drm_info_list debugfs_files[] = { @@ -548,14 +560,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, /* horizontal sync width */ hsw = (mode->hsync_end - mode->hsync_start) * mul / div; - hsw -= 10; /* horizontal back porch */ hbp = (mode->htotal - mode->hsync_end) * mul / div; - hbp -= 14; + + if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0) + hbp += hsw; /* horizontal front porch */ hfp = (mode->hsync_start - mode->hdisplay) * mul / div; + + /* subtract packet overhead */ + hsw -= 10; + hbp -= 14; hfp -= 8; tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1); @@ -726,11 +743,6 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi) tegra_dsi_soft_reset(dsi->slave); } -static int tegra_dsi_connector_dpms(struct drm_connector *connector, int mode) -{ - return 0; -} - static void tegra_dsi_connector_reset(struct drm_connector *connector) { struct tegra_dsi_state *state; @@ -757,7 +769,7 @@ tegra_dsi_connector_duplicate_state(struct drm_connector *connector) } static const struct drm_connector_funcs tegra_dsi_connector_funcs = { - .dpms = tegra_dsi_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .reset = tegra_dsi_connector_reset, .detect = tegra_output_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -783,22 +795,48 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { .destroy = tegra_output_encoder_destroy, }; -static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode) +static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) { -} + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_dsi *dsi = to_dsi(output); + u32 value; + int err; -static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder) -{ -} + if (output->panel) + drm_panel_disable(output->panel); -static void tegra_dsi_encoder_commit(struct drm_encoder *encoder) -{ + tegra_dsi_video_disable(dsi); + + /* + * The following accesses registers of the display controller, so make + * sure it's only executed when the output is attached to one. + */ + if (dc) { + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~DSI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + } + + err = tegra_dsi_wait_idle(dsi, 100); + if (err < 0) + dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err); + + tegra_dsi_soft_reset(dsi); + + if (output->panel) + drm_panel_unprepare(output->panel); + + tegra_dsi_disable(dsi); + + return; } -static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) +static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) { + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct tegra_output *output = encoder_to_output(encoder); struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_dsi *dsi = to_dsi(output); @@ -836,45 +874,6 @@ static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder, return; } -static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) -{ - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct tegra_dsi *dsi = to_dsi(output); - u32 value; - int err; - - if (output->panel) - drm_panel_disable(output->panel); - - tegra_dsi_video_disable(dsi); - - /* - * The following accesses registers of the display controller, so make - * sure it's only executed when the output is attached to one. - */ - if (dc) { - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value &= ~DSI_ENABLE; - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - } - - err = tegra_dsi_wait_idle(dsi, 100); - if (err < 0) - dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err); - - tegra_dsi_soft_reset(dsi); - - if (output->panel) - drm_panel_unprepare(output->panel); - - tegra_dsi_disable(dsi); - - return; -} - static int tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -957,11 +956,8 @@ tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { - .dpms = tegra_dsi_encoder_dpms, - .prepare = tegra_dsi_encoder_prepare, - .commit = tegra_dsi_encoder_commit, - .mode_set = tegra_dsi_encoder_mode_set, .disable = tegra_dsi_encoder_disable, + .enable = tegra_dsi_encoder_enable, .atomic_check = tegra_dsi_encoder_atomic_check, }; @@ -993,6 +989,10 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi) DSI_PAD_OUT_CLK(0x0); tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2); + value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) | + DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3); + tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3); + return tegra_mipi_calibrate(dsi->mipi); } @@ -1622,6 +1622,9 @@ static int tegra_dsi_remove(struct platform_device *pdev) } static const struct of_device_id tegra_dsi_of_match[] = { + { .compatible = "nvidia,tegra210-dsi", }, + { .compatible = "nvidia,tegra132-dsi", }, + { .compatible = "nvidia,tegra124-dsi", }, { .compatible = "nvidia,tegra114-dsi", }, { }, }; diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h index bad1006a5150..219263615399 100644 --- a/drivers/gpu/drm/tegra/dsi.h +++ b/drivers/gpu/drm/tegra/dsi.h @@ -113,6 +113,10 @@ #define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12) #define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16) #define DSI_PAD_CONTROL_3 0x51 +#define DSI_PAD_PREEMP_PD_CLK(x) (((x) & 0x3) << 12) +#define DSI_PAD_PREEMP_PU_CLK(x) (((x) & 0x3) << 8) +#define DSI_PAD_PREEMP_PD(x) (((x) & 0x3) << 4) +#define DSI_PAD_PREEMP_PU(x) (((x) & 0x3) << 0) #define DSI_PAD_CONTROL_4 0x52 #define DSI_GANGED_MODE_CONTROL 0x53 #define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0) diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 397fb34d5d5b..07c844b746b4 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -184,9 +184,9 @@ unreference: #ifdef CONFIG_DRM_TEGRA_FBDEV static struct fb_ops tegra_fb_ops = { .owner = THIS_MODULE, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, @@ -224,11 +224,11 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, if (IS_ERR(bo)) return PTR_ERR(bo); - info = framebuffer_alloc(0, drm->dev); - if (!info) { + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { dev_err(drm->dev, "failed to allocate framebuffer info\n"); drm_gem_object_unreference_unlocked(&bo->gem); - return -ENOMEM; + return PTR_ERR(info); } fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); @@ -248,12 +248,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, info->flags = FBINFO_FLAG_DEFAULT; info->fbops = &tegra_fb_ops; - err = fb_alloc_cmap(&info->cmap, 256, 0); - if (err < 0) { - dev_err(drm->dev, "failed to allocate color map: %d\n", err); - goto destroy; - } - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, helper, fb->width, fb->height); @@ -282,7 +276,7 @@ destroy: drm_framebuffer_unregister_private(fb); tegra_fb_destroy(fb); release: - framebuffer_release(info); + drm_fb_helper_release_fbi(helper); return err; } @@ -347,20 +341,9 @@ fini: static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) { - struct fb_info *info = fbdev->base.fbdev; - - if (info) { - int err; - err = unregister_framebuffer(info); - if (err < 0) - DRM_DEBUG_KMS("failed to unregister framebuffer\n"); - - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&fbdev->base); + drm_fb_helper_release_fbi(&fbdev->base); if (fbdev->fb) { drm_framebuffer_unregister_private(&fbdev->fb->base); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index fe4008a7ddba..52b32cbd9de6 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -772,14 +772,8 @@ static bool tegra_output_is_hdmi(struct tegra_output *output) return drm_detect_hdmi_monitor(edid); } -static int tegra_hdmi_connector_dpms(struct drm_connector *connector, - int mode) -{ - return 0; -} - static const struct drm_connector_funcs tegra_hdmi_connector_funcs = { - .dpms = tegra_hdmi_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, .detect = tegra_output_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -819,22 +813,27 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { .destroy = tegra_output_encoder_destroy, }; -static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) +static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) { -} + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + u32 value; -static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder) -{ -} + /* + * The following accesses registers of the display controller, so make + * sure it's only executed when the output is attached to one. + */ + if (dc) { + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~HDMI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); -static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder) -{ + tegra_dc_commit(dc); + } } -static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) +static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) { + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; struct tegra_output *output = encoder_to_output(encoder); struct tegra_dc *dc = to_tegra_dc(encoder->crtc); @@ -873,13 +872,13 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS); - tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, + tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888, DC_DISP_DISP_COLOR_CONTROL); /* video_preamble uses h_pulse2 */ pulse_start = 1 + h_sync_width + h_back_porch - 10; - tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); + tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE | PULSE_LAST_END_A; @@ -1036,24 +1035,6 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, /* TODO: add HDCP support */ } -static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) -{ - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - u32 value; - - /* - * The following accesses registers of the display controller, so make - * sure it's only executed when the output is attached to one. - */ - if (dc) { - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value &= ~HDMI_ENABLE; - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - } -} - static int tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -1076,11 +1057,8 @@ tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { - .dpms = tegra_hdmi_encoder_dpms, - .prepare = tegra_hdmi_encoder_prepare, - .commit = tegra_hdmi_encoder_commit, - .mode_set = tegra_hdmi_encoder_mode_set, .disable = tegra_hdmi_encoder_disable, + .enable = tegra_hdmi_encoder_enable, .atomic_check = tegra_hdmi_encoder_atomic_check, }; @@ -1088,11 +1066,16 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct tegra_hdmi *hdmi = node->info_ent->data; - int err; + struct drm_crtc *crtc = hdmi->output.encoder.crtc; + struct drm_device *drm = node->minor->dev; + int err = 0; - err = clk_prepare_enable(hdmi->clk); - if (err) - return err; + drm_modeset_lock_all(drm); + + if (!crtc || !crtc->state->active) { + err = -EBUSY; + goto unlock; + } #define DUMP_REG(name) \ seq_printf(s, "%-56s %#05x %08x\n", #name, name, \ @@ -1259,9 +1242,9 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data) #undef DUMP_REG - clk_disable_unprepare(hdmi->clk); - - return 0; +unlock: + drm_modeset_unlock_all(drm); + return err; } static struct drm_info_list debugfs_files[] = { diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 37db47975d48..46664b622270 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -7,8 +7,6 @@ * published by the Free Software Foundation. */ -#include <linux/of_gpio.h> - #include <drm/drm_atomic_helper.h> #include <drm/drm_panel.h> #include "drm.h" @@ -59,10 +57,17 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force) enum drm_connector_status status = connector_status_unknown; if (gpio_is_valid(output->hpd_gpio)) { - if (gpio_get_value(output->hpd_gpio) == 0) - status = connector_status_disconnected; - else - status = connector_status_connected; + if (output->hpd_gpio_flags & OF_GPIO_ACTIVE_LOW) { + if (gpio_get_value(output->hpd_gpio) != 0) + status = connector_status_disconnected; + else + status = connector_status_connected; + } else { + if (gpio_get_value(output->hpd_gpio) == 0) + status = connector_status_disconnected; + else + status = connector_status_connected; + } } else { if (!output->panel) status = connector_status_disconnected; @@ -97,7 +102,6 @@ static irqreturn_t hpd_irq(int irq, void *data) int tegra_output_probe(struct tegra_output *output) { struct device_node *ddc, *panel; - enum of_gpio_flags flags; int err, size; if (!output->of_node) @@ -128,7 +132,7 @@ int tegra_output_probe(struct tegra_output *output) output->hpd_gpio = of_get_named_gpio_flags(output->of_node, "nvidia,hpd-gpio", 0, - &flags); + &output->hpd_gpio_flags); if (gpio_is_valid(output->hpd_gpio)) { unsigned long flags; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 9a99d213e1b1..bc9735b4ad60 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -18,7 +18,6 @@ struct tegra_rgb { struct tegra_output output; struct tegra_dc *dc; - bool enabled; struct clk *clk_parent; struct clk *clk; @@ -88,14 +87,8 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, tegra_dc_writel(dc, table[i].value, table[i].offset); } -static int tegra_rgb_connector_dpms(struct drm_connector *connector, - int mode) -{ - return 0; -} - static const struct drm_connector_funcs tegra_rgb_connector_funcs = { - .dpms = tegra_rgb_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, .detect = tegra_output_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -126,21 +119,22 @@ static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { .destroy = tegra_output_encoder_destroy, }; -static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode) +static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) { -} + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_rgb *rgb = to_rgb(output); -static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder) -{ -} + if (output->panel) + drm_panel_disable(output->panel); -static void tegra_rgb_encoder_commit(struct drm_encoder *encoder) -{ + tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); + tegra_dc_commit(rgb->dc); + + if (output->panel) + drm_panel_unprepare(output->panel); } -static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) +static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) { struct tegra_output *output = encoder_to_output(encoder); struct tegra_rgb *rgb = to_rgb(output); @@ -175,21 +169,6 @@ static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder, drm_panel_enable(output->panel); } -static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) -{ - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_rgb *rgb = to_rgb(output); - - if (output->panel) - drm_panel_disable(output->panel); - - tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); - tegra_dc_commit(rgb->dc); - - if (output->panel) - drm_panel_unprepare(output->panel); -} - static int tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -232,11 +211,8 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = { - .dpms = tegra_rgb_encoder_dpms, - .prepare = tegra_rgb_encoder_prepare, - .commit = tegra_rgb_encoder_commit, - .mode_set = tegra_rgb_encoder_mode_set, .disable = tegra_rgb_encoder_disable, + .enable = tegra_rgb_encoder_enable, .atomic_check = tegra_rgb_encoder_atomic_check, }; diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index ee8ad0d4a0f2..da1715ebdd71 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -10,7 +10,9 @@ #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/io.h> +#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #include <linux/reset.h> #include <soc/tegra/pmc.h> @@ -23,11 +25,146 @@ #include "drm.h" #include "sor.h" +#define SOR_REKEY 0x38 + +struct tegra_sor_hdmi_settings { + unsigned long frequency; + + u8 vcocap; + u8 ichpmp; + u8 loadadj; + u8 termadj; + u8 tx_pu; + u8 bg_vref; + + u8 drive_current[4]; + u8 preemphasis[4]; +}; + +#if 1 +static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = { + { + .frequency = 54000000, + .vcocap = 0x0, + .ichpmp = 0x1, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x10, + .bg_vref = 0x8, + .drive_current = { 0x33, 0x3a, 0x3a, 0x3a }, + .preemphasis = { 0x00, 0x00, 0x00, 0x00 }, + }, { + .frequency = 75000000, + .vcocap = 0x3, + .ichpmp = 0x1, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x40, + .bg_vref = 0x8, + .drive_current = { 0x33, 0x3a, 0x3a, 0x3a }, + .preemphasis = { 0x00, 0x00, 0x00, 0x00 }, + }, { + .frequency = 150000000, + .vcocap = 0x3, + .ichpmp = 0x1, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x66, + .bg_vref = 0x8, + .drive_current = { 0x33, 0x3a, 0x3a, 0x3a }, + .preemphasis = { 0x00, 0x00, 0x00, 0x00 }, + }, { + .frequency = 300000000, + .vcocap = 0x3, + .ichpmp = 0x1, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x66, + .bg_vref = 0xa, + .drive_current = { 0x33, 0x3f, 0x3f, 0x3f }, + .preemphasis = { 0x00, 0x17, 0x17, 0x17 }, + }, { + .frequency = 600000000, + .vcocap = 0x3, + .ichpmp = 0x1, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x66, + .bg_vref = 0x8, + .drive_current = { 0x33, 0x3f, 0x3f, 0x3f }, + .preemphasis = { 0x00, 0x00, 0x00, 0x00 }, + }, +}; +#else +static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = { + { + .frequency = 75000000, + .vcocap = 0x3, + .ichpmp = 0x1, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x40, + .bg_vref = 0x8, + .drive_current = { 0x29, 0x29, 0x29, 0x29 }, + .preemphasis = { 0x00, 0x00, 0x00, 0x00 }, + }, { + .frequency = 150000000, + .vcocap = 0x3, + .ichpmp = 0x1, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x66, + .bg_vref = 0x8, + .drive_current = { 0x30, 0x37, 0x37, 0x37 }, + .preemphasis = { 0x01, 0x02, 0x02, 0x02 }, + }, { + .frequency = 300000000, + .vcocap = 0x3, + .ichpmp = 0x6, + .loadadj = 0x3, + .termadj = 0x9, + .tx_pu = 0x66, + .bg_vref = 0xf, + .drive_current = { 0x30, 0x37, 0x37, 0x37 }, + .preemphasis = { 0x10, 0x3e, 0x3e, 0x3e }, + }, { + .frequency = 600000000, + .vcocap = 0x3, + .ichpmp = 0xa, + .loadadj = 0x3, + .termadj = 0xb, + .tx_pu = 0x66, + .bg_vref = 0xe, + .drive_current = { 0x35, 0x3e, 0x3e, 0x3e }, + .preemphasis = { 0x02, 0x3f, 0x3f, 0x3f }, + }, +}; +#endif + +struct tegra_sor_soc { + bool supports_edp; + bool supports_lvds; + bool supports_hdmi; + bool supports_dp; + + const struct tegra_sor_hdmi_settings *settings; + unsigned int num_settings; +}; + +struct tegra_sor; + +struct tegra_sor_ops { + const char *name; + int (*probe)(struct tegra_sor *sor); + int (*remove)(struct tegra_sor *sor); +}; + struct tegra_sor { struct host1x_client client; struct tegra_output output; struct device *dev; + const struct tegra_sor_soc *soc; void __iomem *regs; struct reset_control *rst; @@ -38,12 +175,19 @@ struct tegra_sor { struct tegra_dpaux *dpaux; - struct mutex lock; - bool enabled; - struct drm_info_list *debugfs_files; struct drm_minor *minor; struct dentry *debugfs; + + const struct tegra_sor_ops *ops; + + /* for HDMI 2.0 */ + struct tegra_sor_hdmi_settings *settings; + unsigned int num_settings; + + struct regulator *avdd_io_supply; + struct regulator *vdd_pll_supply; + struct regulator *hdmi_supply; }; struct tegra_sor_config { @@ -94,40 +238,40 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor, SOR_LANE_DRIVE_CURRENT_LANE2(0x40) | SOR_LANE_DRIVE_CURRENT_LANE1(0x40) | SOR_LANE_DRIVE_CURRENT_LANE0(0x40); - tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0); + tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0); value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) | SOR_LANE_PREEMPHASIS_LANE2(0x0f) | SOR_LANE_PREEMPHASIS_LANE1(0x0f) | SOR_LANE_PREEMPHASIS_LANE0(0x0f); - tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0); + tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0); - value = SOR_LANE_POST_CURSOR_LANE3(0x00) | - SOR_LANE_POST_CURSOR_LANE2(0x00) | - SOR_LANE_POST_CURSOR_LANE1(0x00) | - SOR_LANE_POST_CURSOR_LANE0(0x00); - tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0); + value = SOR_LANE_POSTCURSOR_LANE3(0x00) | + SOR_LANE_POSTCURSOR_LANE2(0x00) | + SOR_LANE_POSTCURSOR_LANE1(0x00) | + SOR_LANE_POSTCURSOR_LANE0(0x00); + tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0); /* disable LVDS mode */ tegra_sor_writel(sor, 0, SOR_LVDS); - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value |= SOR_DP_PADCTL_TX_PU_ENABLE; value &= ~SOR_DP_PADCTL_TX_PU_MASK; value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */ - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0; - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); usleep_range(10, 100); - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0); - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B); if (err < 0) @@ -148,11 +292,11 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor, if (err < 0) return err; - value = tegra_sor_readl(sor, SOR_DP_SPARE_0); + value = tegra_sor_readl(sor, SOR_DP_SPARE0); value |= SOR_DP_SPARE_SEQ_ENABLE; value &= ~SOR_DP_SPARE_PANEL_INTERNAL; value |= SOR_DP_SPARE_MACRO_SOR_CLK; - tegra_sor_writel(sor, value, SOR_DP_SPARE_0); + tegra_sor_writel(sor, value, SOR_DP_SPARE0); for (i = 0, value = 0; i < link->num_lanes; i++) { unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | @@ -187,18 +331,59 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor, return 0; } +static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor) +{ + u32 mask = 0x08, adj = 0, value; + + /* enable pad calibration logic */ + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); + value &= ~SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); + + value = tegra_sor_readl(sor, SOR_PLL1); + value |= SOR_PLL1_TMDS_TERM; + tegra_sor_writel(sor, value, SOR_PLL1); + + while (mask) { + adj |= mask; + + value = tegra_sor_readl(sor, SOR_PLL1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, SOR_PLL1); + + usleep_range(100, 200); + + value = tegra_sor_readl(sor, SOR_PLL1); + if (value & SOR_PLL1_TERM_COMPOUT) + adj &= ~mask; + + mask >>= 1; + } + + value = tegra_sor_readl(sor, SOR_PLL1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, SOR_PLL1); + + /* disable pad calibration logic */ + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); + value |= SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); +} + static void tegra_sor_super_update(struct tegra_sor *sor) { - tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0); - tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0); - tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0); + tegra_sor_writel(sor, 0, SOR_SUPER_STATE0); + tegra_sor_writel(sor, 1, SOR_SUPER_STATE0); + tegra_sor_writel(sor, 0, SOR_SUPER_STATE0); } static void tegra_sor_update(struct tegra_sor *sor) { - tegra_sor_writel(sor, 0, SOR_STATE_0); - tegra_sor_writel(sor, 1, SOR_STATE_0); - tegra_sor_writel(sor, 0, SOR_STATE_0); + tegra_sor_writel(sor, 0, SOR_STATE0); + tegra_sor_writel(sor, 1, SOR_STATE0); + tegra_sor_writel(sor, 0, SOR_STATE0); } static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout) @@ -235,16 +420,16 @@ static int tegra_sor_attach(struct tegra_sor *sor) unsigned long value, timeout; /* wake up in normal mode */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value = tegra_sor_readl(sor, SOR_SUPER_STATE1); value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE; value |= SOR_SUPER_STATE_MODE_NORMAL; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_writel(sor, value, SOR_SUPER_STATE1); tegra_sor_super_update(sor); /* attach */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value = tegra_sor_readl(sor, SOR_SUPER_STATE1); value |= SOR_SUPER_STATE_ATTACHED; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_writel(sor, value, SOR_SUPER_STATE1); tegra_sor_super_update(sor); timeout = jiffies + msecs_to_jiffies(250); @@ -385,7 +570,7 @@ static int tegra_sor_compute_params(struct tegra_sor *sor, } static int tegra_sor_calc_config(struct tegra_sor *sor, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct tegra_sor_config *config, struct drm_dp_link *link) { @@ -481,9 +666,9 @@ static int tegra_sor_detach(struct tegra_sor *sor) unsigned long value, timeout; /* switch to safe mode */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value = tegra_sor_readl(sor, SOR_SUPER_STATE1); value &= ~SOR_SUPER_STATE_MODE_NORMAL; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_writel(sor, value, SOR_SUPER_STATE1); tegra_sor_super_update(sor); timeout = jiffies + msecs_to_jiffies(250); @@ -498,15 +683,15 @@ static int tegra_sor_detach(struct tegra_sor *sor) return -ETIMEDOUT; /* go to sleep */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value = tegra_sor_readl(sor, SOR_SUPER_STATE1); value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_writel(sor, value, SOR_SUPER_STATE1); tegra_sor_super_update(sor); /* detach */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value = tegra_sor_readl(sor, SOR_SUPER_STATE1); value &= ~SOR_SUPER_STATE_ATTACHED; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_writel(sor, value, SOR_SUPER_STATE1); tegra_sor_super_update(sor); timeout = jiffies + msecs_to_jiffies(250); @@ -552,10 +737,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor) if (err < 0) dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); /* stop lane sequencer */ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | @@ -575,39 +760,26 @@ static int tegra_sor_power_down(struct tegra_sor *sor) if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) return -ETIMEDOUT; - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value |= SOR_PLL2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); usleep_range(20, 100); - value = tegra_sor_readl(sor, SOR_PLL_0); - value |= SOR_PLL_0_POWER_OFF; - value |= SOR_PLL_0_VCOPD; - tegra_sor_writel(sor, value, SOR_PLL_0); + value = tegra_sor_readl(sor, SOR_PLL0); + value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR; + tegra_sor_writel(sor, value, SOR_PLL0); - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_SEQ_PLLCAPPD; - value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value |= SOR_PLL2_SEQ_PLLCAPPD; + value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; + tegra_sor_writel(sor, value, SOR_PLL2); usleep_range(20, 100); return 0; } -static int tegra_sor_crc_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - - return 0; -} - -static int tegra_sor_crc_release(struct inode *inode, struct file *file) -{ - return 0; -} - static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) { u32 value; @@ -615,8 +787,8 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) timeout = jiffies + msecs_to_jiffies(timeout); while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_CRC_A); - if (value & SOR_CRC_A_VALID) + value = tegra_sor_readl(sor, SOR_CRCA); + if (value & SOR_CRCA_VALID) return 0; usleep_range(100, 200); @@ -625,24 +797,25 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) return -ETIMEDOUT; } -static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, - size_t size, loff_t *ppos) +static int tegra_sor_show_crc(struct seq_file *s, void *data) { - struct tegra_sor *sor = file->private_data; - ssize_t num, err; - char buf[10]; + struct drm_info_node *node = s->private; + struct tegra_sor *sor = node->info_ent->data; + struct drm_crtc *crtc = sor->output.encoder.crtc; + struct drm_device *drm = node->minor->dev; + int err = 0; u32 value; - mutex_lock(&sor->lock); + drm_modeset_lock_all(drm); - if (!sor->enabled) { - err = -EAGAIN; + if (!crtc || !crtc->state->active) { + err = -EBUSY; goto unlock; } - value = tegra_sor_readl(sor, SOR_STATE_1); + value = tegra_sor_readl(sor, SOR_STATE1); value &= ~SOR_STATE_ASY_CRC_MODE_MASK; - tegra_sor_writel(sor, value, SOR_STATE_1); + tegra_sor_writel(sor, value, SOR_STATE1); value = tegra_sor_readl(sor, SOR_CRC_CNTRL); value |= SOR_CRC_CNTRL_ENABLE; @@ -656,65 +829,66 @@ static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, if (err < 0) goto unlock; - tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A); - value = tegra_sor_readl(sor, SOR_CRC_B); + tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA); + value = tegra_sor_readl(sor, SOR_CRCB); - num = scnprintf(buf, sizeof(buf), "%08x\n", value); - - err = simple_read_from_buffer(buffer, size, ppos, buf, num); + seq_printf(s, "%08x\n", value); unlock: - mutex_unlock(&sor->lock); + drm_modeset_unlock_all(drm); return err; } -static const struct file_operations tegra_sor_crc_fops = { - .owner = THIS_MODULE, - .open = tegra_sor_crc_open, - .read = tegra_sor_crc_read, - .release = tegra_sor_crc_release, -}; - static int tegra_sor_show_regs(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct tegra_sor *sor = node->info_ent->data; + struct drm_crtc *crtc = sor->output.encoder.crtc; + struct drm_device *drm = node->minor->dev; + int err = 0; + + drm_modeset_lock_all(drm); + + if (!crtc || !crtc->state->active) { + err = -EBUSY; + goto unlock; + } #define DUMP_REG(name) \ seq_printf(s, "%-38s %#05x %08x\n", #name, name, \ tegra_sor_readl(sor, name)) DUMP_REG(SOR_CTXSW); - DUMP_REG(SOR_SUPER_STATE_0); - DUMP_REG(SOR_SUPER_STATE_1); - DUMP_REG(SOR_STATE_0); - DUMP_REG(SOR_STATE_1); - DUMP_REG(SOR_HEAD_STATE_0(0)); - DUMP_REG(SOR_HEAD_STATE_0(1)); - DUMP_REG(SOR_HEAD_STATE_1(0)); - DUMP_REG(SOR_HEAD_STATE_1(1)); - DUMP_REG(SOR_HEAD_STATE_2(0)); - DUMP_REG(SOR_HEAD_STATE_2(1)); - DUMP_REG(SOR_HEAD_STATE_3(0)); - DUMP_REG(SOR_HEAD_STATE_3(1)); - DUMP_REG(SOR_HEAD_STATE_4(0)); - DUMP_REG(SOR_HEAD_STATE_4(1)); - DUMP_REG(SOR_HEAD_STATE_5(0)); - DUMP_REG(SOR_HEAD_STATE_5(1)); + DUMP_REG(SOR_SUPER_STATE0); + DUMP_REG(SOR_SUPER_STATE1); + DUMP_REG(SOR_STATE0); + DUMP_REG(SOR_STATE1); + DUMP_REG(SOR_HEAD_STATE0(0)); + DUMP_REG(SOR_HEAD_STATE0(1)); + DUMP_REG(SOR_HEAD_STATE1(0)); + DUMP_REG(SOR_HEAD_STATE1(1)); + DUMP_REG(SOR_HEAD_STATE2(0)); + DUMP_REG(SOR_HEAD_STATE2(1)); + DUMP_REG(SOR_HEAD_STATE3(0)); + DUMP_REG(SOR_HEAD_STATE3(1)); + DUMP_REG(SOR_HEAD_STATE4(0)); + DUMP_REG(SOR_HEAD_STATE4(1)); + DUMP_REG(SOR_HEAD_STATE5(0)); + DUMP_REG(SOR_HEAD_STATE5(1)); DUMP_REG(SOR_CRC_CNTRL); DUMP_REG(SOR_DP_DEBUG_MVID); DUMP_REG(SOR_CLK_CNTRL); DUMP_REG(SOR_CAP); DUMP_REG(SOR_PWR); DUMP_REG(SOR_TEST); - DUMP_REG(SOR_PLL_0); - DUMP_REG(SOR_PLL_1); - DUMP_REG(SOR_PLL_2); - DUMP_REG(SOR_PLL_3); + DUMP_REG(SOR_PLL0); + DUMP_REG(SOR_PLL1); + DUMP_REG(SOR_PLL2); + DUMP_REG(SOR_PLL3); DUMP_REG(SOR_CSTM); DUMP_REG(SOR_LVDS); - DUMP_REG(SOR_CRC_A); - DUMP_REG(SOR_CRC_B); + DUMP_REG(SOR_CRCA); + DUMP_REG(SOR_CRCB); DUMP_REG(SOR_BLANK); DUMP_REG(SOR_SEQ_CTL); DUMP_REG(SOR_LANE_SEQ_CTL); @@ -736,86 +910,89 @@ static int tegra_sor_show_regs(struct seq_file *s, void *data) DUMP_REG(SOR_SEQ_INST(15)); DUMP_REG(SOR_PWM_DIV); DUMP_REG(SOR_PWM_CTL); - DUMP_REG(SOR_VCRC_A_0); - DUMP_REG(SOR_VCRC_A_1); - DUMP_REG(SOR_VCRC_B_0); - DUMP_REG(SOR_VCRC_B_1); - DUMP_REG(SOR_CCRC_A_0); - DUMP_REG(SOR_CCRC_A_1); - DUMP_REG(SOR_CCRC_B_0); - DUMP_REG(SOR_CCRC_B_1); - DUMP_REG(SOR_EDATA_A_0); - DUMP_REG(SOR_EDATA_A_1); - DUMP_REG(SOR_EDATA_B_0); - DUMP_REG(SOR_EDATA_B_1); - DUMP_REG(SOR_COUNT_A_0); - DUMP_REG(SOR_COUNT_A_1); - DUMP_REG(SOR_COUNT_B_0); - DUMP_REG(SOR_COUNT_B_1); - DUMP_REG(SOR_DEBUG_A_0); - DUMP_REG(SOR_DEBUG_A_1); - DUMP_REG(SOR_DEBUG_B_0); - DUMP_REG(SOR_DEBUG_B_1); + DUMP_REG(SOR_VCRC_A0); + DUMP_REG(SOR_VCRC_A1); + DUMP_REG(SOR_VCRC_B0); + DUMP_REG(SOR_VCRC_B1); + DUMP_REG(SOR_CCRC_A0); + DUMP_REG(SOR_CCRC_A1); + DUMP_REG(SOR_CCRC_B0); + DUMP_REG(SOR_CCRC_B1); + DUMP_REG(SOR_EDATA_A0); + DUMP_REG(SOR_EDATA_A1); + DUMP_REG(SOR_EDATA_B0); + DUMP_REG(SOR_EDATA_B1); + DUMP_REG(SOR_COUNT_A0); + DUMP_REG(SOR_COUNT_A1); + DUMP_REG(SOR_COUNT_B0); + DUMP_REG(SOR_COUNT_B1); + DUMP_REG(SOR_DEBUG_A0); + DUMP_REG(SOR_DEBUG_A1); + DUMP_REG(SOR_DEBUG_B0); + DUMP_REG(SOR_DEBUG_B1); DUMP_REG(SOR_TRIG); DUMP_REG(SOR_MSCHECK); DUMP_REG(SOR_XBAR_CTRL); DUMP_REG(SOR_XBAR_POL); - DUMP_REG(SOR_DP_LINKCTL_0); - DUMP_REG(SOR_DP_LINKCTL_1); - DUMP_REG(SOR_LANE_DRIVE_CURRENT_0); - DUMP_REG(SOR_LANE_DRIVE_CURRENT_1); - DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0); - DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1); - DUMP_REG(SOR_LANE_PREEMPHASIS_0); - DUMP_REG(SOR_LANE_PREEMPHASIS_1); - DUMP_REG(SOR_LANE4_PREEMPHASIS_0); - DUMP_REG(SOR_LANE4_PREEMPHASIS_1); - DUMP_REG(SOR_LANE_POST_CURSOR_0); - DUMP_REG(SOR_LANE_POST_CURSOR_1); - DUMP_REG(SOR_DP_CONFIG_0); - DUMP_REG(SOR_DP_CONFIG_1); - DUMP_REG(SOR_DP_MN_0); - DUMP_REG(SOR_DP_MN_1); - DUMP_REG(SOR_DP_PADCTL_0); - DUMP_REG(SOR_DP_PADCTL_1); - DUMP_REG(SOR_DP_DEBUG_0); - DUMP_REG(SOR_DP_DEBUG_1); - DUMP_REG(SOR_DP_SPARE_0); - DUMP_REG(SOR_DP_SPARE_1); + DUMP_REG(SOR_DP_LINKCTL0); + DUMP_REG(SOR_DP_LINKCTL1); + DUMP_REG(SOR_LANE_DRIVE_CURRENT0); + DUMP_REG(SOR_LANE_DRIVE_CURRENT1); + DUMP_REG(SOR_LANE4_DRIVE_CURRENT0); + DUMP_REG(SOR_LANE4_DRIVE_CURRENT1); + DUMP_REG(SOR_LANE_PREEMPHASIS0); + DUMP_REG(SOR_LANE_PREEMPHASIS1); + DUMP_REG(SOR_LANE4_PREEMPHASIS0); + DUMP_REG(SOR_LANE4_PREEMPHASIS1); + DUMP_REG(SOR_LANE_POSTCURSOR0); + DUMP_REG(SOR_LANE_POSTCURSOR1); + DUMP_REG(SOR_DP_CONFIG0); + DUMP_REG(SOR_DP_CONFIG1); + DUMP_REG(SOR_DP_MN0); + DUMP_REG(SOR_DP_MN1); + DUMP_REG(SOR_DP_PADCTL0); + DUMP_REG(SOR_DP_PADCTL1); + DUMP_REG(SOR_DP_DEBUG0); + DUMP_REG(SOR_DP_DEBUG1); + DUMP_REG(SOR_DP_SPARE0); + DUMP_REG(SOR_DP_SPARE1); DUMP_REG(SOR_DP_AUDIO_CTRL); DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS); DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS); DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER); - DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0); - DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1); - DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2); - DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3); - DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4); - DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5); - DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6); DUMP_REG(SOR_DP_TPG); DUMP_REG(SOR_DP_TPG_CONFIG); - DUMP_REG(SOR_DP_LQ_CSTM_0); - DUMP_REG(SOR_DP_LQ_CSTM_1); - DUMP_REG(SOR_DP_LQ_CSTM_2); + DUMP_REG(SOR_DP_LQ_CSTM0); + DUMP_REG(SOR_DP_LQ_CSTM1); + DUMP_REG(SOR_DP_LQ_CSTM2); #undef DUMP_REG - return 0; +unlock: + drm_modeset_unlock_all(drm); + return err; } static const struct drm_info_list debugfs_files[] = { + { "crc", tegra_sor_show_crc, 0, NULL }, { "regs", tegra_sor_show_regs, 0, NULL }, }; static int tegra_sor_debugfs_init(struct tegra_sor *sor, struct drm_minor *minor) { - struct dentry *entry; + const char *name = sor->soc->supports_dp ? "sor1" : "sor"; unsigned int i; - int err = 0; + int err; - sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); + sor->debugfs = debugfs_create_dir(name, minor->debugfs_root); if (!sor->debugfs) return -ENOMEM; @@ -835,14 +1012,9 @@ static int tegra_sor_debugfs_init(struct tegra_sor *sor, if (err < 0) goto free; - entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, - &tegra_sor_crc_fops); - if (!entry) { - err = -ENOMEM; - goto free; - } + sor->minor = minor; - return err; + return 0; free: kfree(sor->debugfs_files); @@ -860,15 +1032,10 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor) sor->minor = NULL; kfree(sor->debugfs_files); - sor->debugfs = NULL; - - debugfs_remove_recursive(sor->debugfs); sor->debugfs_files = NULL; -} -static int tegra_sor_connector_dpms(struct drm_connector *connector, int mode) -{ - return 0; + debugfs_remove_recursive(sor->debugfs); + sor->debugfs = NULL; } static enum drm_connector_status @@ -880,11 +1047,11 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force) if (sor->dpaux) return tegra_dpaux_detect(sor->dpaux); - return connector_status_unknown; + return tegra_output_connector_detect(connector, force); } static const struct drm_connector_funcs tegra_sor_connector_funcs = { - .dpms = tegra_sor_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, .detect = tegra_sor_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -927,22 +1094,102 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { .destroy = tegra_output_encoder_destroy, }; -static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode) +static void tegra_sor_edp_disable(struct drm_encoder *encoder) { -} + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + u32 value; + int err; -static void tegra_sor_encoder_prepare(struct drm_encoder *encoder) -{ + if (output->panel) + drm_panel_disable(output->panel); + + err = tegra_sor_detach(sor); + if (err < 0) + dev_err(sor->dev, "failed to detach SOR: %d\n", err); + + tegra_sor_writel(sor, 0, SOR_STATE1); + tegra_sor_update(sor); + + /* + * The following accesses registers of the display controller, so make + * sure it's only executed when the output is attached to one. + */ + if (dc) { + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~SOR_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + } + + err = tegra_sor_power_down(sor); + if (err < 0) + dev_err(sor->dev, "failed to power down SOR: %d\n", err); + + if (sor->dpaux) { + err = tegra_dpaux_disable(sor->dpaux); + if (err < 0) + dev_err(sor->dev, "failed to disable DP: %d\n", err); + } + + err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS); + if (err < 0) + dev_err(sor->dev, "failed to power off I/O rail: %d\n", err); + + if (output->panel) + drm_panel_unprepare(output->panel); + + reset_control_assert(sor->rst); + clk_disable_unprepare(sor->clk); } -static void tegra_sor_encoder_commit(struct drm_encoder *encoder) +#if 0 +static int calc_h_ref_to_sync(const struct drm_display_mode *mode, + unsigned int *value) { + unsigned int hfp, hsw, hbp, a = 0, b; + + hfp = mode->hsync_start - mode->hdisplay; + hsw = mode->hsync_end - mode->hsync_start; + hbp = mode->htotal - mode->hsync_end; + + pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp); + + b = hfp - 1; + + pr_info("a: %u, b: %u\n", a, b); + pr_info("a + hsw + hbp = %u\n", a + hsw + hbp); + + if (a + hsw + hbp <= 11) { + a = 1 + 11 - hsw - hbp; + pr_info("a: %u\n", a); + } + + if (a > b) + return -EINVAL; + + if (hsw < 1) + return -EINVAL; + + if (mode->hdisplay < 16) + return -EINVAL; + + if (value) { + if (b > a && a % 2) + *value = a + 1; + else + *value = a; + } + + return 0; } +#endif -static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) +static void tegra_sor_edp_enable(struct drm_encoder *encoder) { + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct tegra_output *output = encoder_to_output(encoder); struct tegra_dc *dc = to_tegra_dc(encoder->crtc); unsigned int vbe, vse, hbe, hse, vbs, hbs, i; @@ -953,14 +1200,9 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, int err = 0; u32 value; - mutex_lock(&sor->lock); - - if (sor->enabled) - goto unlock; - err = clk_prepare_enable(sor->clk); if (err < 0) - goto unlock; + dev_err(sor->dev, "failed to enable clock: %d\n", err); reset_control_deassert(sor->rst); @@ -979,7 +1221,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, if (err < 0) { dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - goto unlock; + return; } } @@ -1000,40 +1242,40 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - value = tegra_sor_readl(sor, SOR_PLL_2); - value &= ~SOR_PLL_2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); usleep_range(20, 100); - value = tegra_sor_readl(sor, SOR_PLL_3); - value |= SOR_PLL_3_PLL_VDD_MODE_V3_3; - tegra_sor_writel(sor, value, SOR_PLL_3); + value = tegra_sor_readl(sor, SOR_PLL3); + value |= SOR_PLL3_PLL_VDD_MODE_3V3; + tegra_sor_writel(sor, value, SOR_PLL3); - value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST | - SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT; - tegra_sor_writel(sor, value, SOR_PLL_0); + value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST | + SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT; + tegra_sor_writel(sor, value, SOR_PLL0); - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_SEQ_PLLCAPPD; - value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; - value |= SOR_PLL_2_LVDS_ENABLE; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value |= SOR_PLL2_SEQ_PLLCAPPD; + value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; + value |= SOR_PLL2_LVDS_ENABLE; + tegra_sor_writel(sor, value, SOR_PLL2); - value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM; - tegra_sor_writel(sor, value, SOR_PLL_1); + value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM; + tegra_sor_writel(sor, value, SOR_PLL1); while (true) { - value = tegra_sor_readl(sor, SOR_PLL_2); - if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0) + value = tegra_sor_readl(sor, SOR_PLL2); + if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0) break; usleep_range(250, 1000); } - value = tegra_sor_readl(sor, SOR_PLL_2); - value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE; - value &= ~SOR_PLL_2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; + value &= ~SOR_PLL2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); /* * power up @@ -1046,51 +1288,49 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, tegra_sor_writel(sor, value, SOR_CLK_CNTRL); /* step 1 */ - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN | - SOR_PLL_2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN | + SOR_PLL2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); - value = tegra_sor_readl(sor, SOR_PLL_0); - value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF; - tegra_sor_writel(sor, value, SOR_PLL_0); + value = tegra_sor_readl(sor, SOR_PLL0); + value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR; + tegra_sor_writel(sor, value, SOR_PLL0); - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value &= ~SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); /* step 2 */ err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to power on I/O rail: %d\n", err); - goto unlock; - } usleep_range(5, 100); /* step 3 */ - value = tegra_sor_readl(sor, SOR_PLL_2); - value &= ~SOR_PLL_2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); usleep_range(20, 100); /* step 4 */ - value = tegra_sor_readl(sor, SOR_PLL_0); - value &= ~SOR_PLL_0_POWER_OFF; - value &= ~SOR_PLL_0_VCOPD; - tegra_sor_writel(sor, value, SOR_PLL_0); + value = tegra_sor_readl(sor, SOR_PLL0); + value &= ~SOR_PLL0_VCOPD; + value &= ~SOR_PLL0_PWR; + tegra_sor_writel(sor, value, SOR_PLL0); - value = tegra_sor_readl(sor, SOR_PLL_2); - value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; + tegra_sor_writel(sor, value, SOR_PLL2); usleep_range(200, 1000); /* step 5 */ - value = tegra_sor_readl(sor, SOR_PLL_2); - value &= ~SOR_PLL_2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); /* switch to DP clock */ err = clk_set_parent(sor->clk, sor->clk_dp); @@ -1098,7 +1338,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, dev_err(sor->dev, "failed to set DP parent clock: %d\n", err); /* power DP lanes */ - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); if (link.num_lanes <= 2) value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2); @@ -1115,12 +1355,12 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, else value |= SOR_DP_PADCTL_PD_TXD_0; - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); - value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes); - tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); /* start lane sequencer */ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | @@ -1142,14 +1382,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, tegra_sor_writel(sor, value, SOR_CLK_CNTRL); /* set linkctl */ - value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); value |= SOR_DP_LINKCTL_ENABLE; value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK; value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size); value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); for (i = 0, value = 0; i < 4; i++) { unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | @@ -1160,7 +1400,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, tegra_sor_writel(sor, value, SOR_DP_TPG); - value = tegra_sor_readl(sor, SOR_DP_CONFIG_0); + value = tegra_sor_readl(sor, SOR_DP_CONFIG0); value &= ~SOR_DP_CONFIG_WATERMARK_MASK; value |= SOR_DP_CONFIG_WATERMARK(config.watermark); @@ -1177,7 +1417,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE; value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; - tegra_sor_writel(sor, value, SOR_DP_CONFIG_0); + tegra_sor_writel(sor, value, SOR_DP_CONFIG0); value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS); value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK; @@ -1190,33 +1430,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS); /* enable pad calibration logic */ - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value |= SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); if (sor->dpaux) { u8 rate, lanes; err = drm_dp_link_probe(aux, &link); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - goto unlock; - } err = drm_dp_link_power_up(aux, &link); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to power up eDP link: %d\n", err); - goto unlock; - } err = drm_dp_link_configure(aux, &link); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to configure eDP link: %d\n", err); - goto unlock; - } rate = drm_dp_link_rate_to_bw_code(link.rate); lanes = link.num_lanes; @@ -1226,14 +1460,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); /* disable training pattern generator */ @@ -1250,17 +1484,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, if (err < 0) { dev_err(sor->dev, "DP fast link training failed: %d\n", err); - goto unlock; } dev_dbg(sor->dev, "fast link training succeeded\n"); } err = tegra_sor_power_up(sor, 250); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to power up SOR: %d\n", err); - goto unlock; - } /* * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete @@ -1296,7 +1527,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, break; } - tegra_sor_writel(sor, value, SOR_STATE_1); + tegra_sor_writel(sor, value, SOR_STATE1); /* * TODO: The video timing programming below doesn't seem to match the @@ -1304,25 +1535,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, */ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0)); + tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe)); vse = mode->vsync_end - mode->vsync_start - 1; hse = mode->hsync_end - mode->hsync_start - 1; value = ((vse & 0x7fff) << 16) | (hse & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0)); + tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe)); vbe = vse + (mode->vsync_start - mode->vdisplay); hbe = hse + (mode->hsync_start - mode->hdisplay); value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0)); + tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe)); vbs = vbe + mode->vdisplay; hbs = hbe + mode->hdisplay; value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0)); + tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe)); + + tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe)); /* CSTM (LVDS, link A/B, upper) */ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | @@ -1331,10 +1564,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, /* PWM setup */ err = tegra_sor_setup_pwm(sor, 250); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to setup PWM: %d\n", err); - goto unlock; - } tegra_sor_update(sor); @@ -1345,147 +1576,610 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, tegra_dc_commit(dc); err = tegra_sor_attach(sor); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to attach SOR: %d\n", err); - goto unlock; - } err = tegra_sor_wakeup(sor); - if (err < 0) { + if (err < 0) dev_err(sor->dev, "failed to enable DC: %d\n", err); - goto unlock; - } if (output->panel) drm_panel_enable(output->panel); - - sor->enabled = true; - -unlock: - mutex_unlock(&sor->lock); } -static void tegra_sor_encoder_disable(struct drm_encoder *encoder) +static int +tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_sor *sor = to_sor(output); - u32 value; int err; - mutex_lock(&sor->lock); + err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, + pclk, 0); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); + return err; + } - if (!sor->enabled) - goto unlock; + return 0; +} - if (output->panel) - drm_panel_disable(output->panel); +static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = { + .disable = tegra_sor_edp_disable, + .enable = tegra_sor_edp_enable, + .atomic_check = tegra_sor_encoder_atomic_check, +}; - err = tegra_sor_detach(sor); - if (err < 0) { - dev_err(sor->dev, "failed to detach SOR: %d\n", err); - goto unlock; +static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size) +{ + u32 value = 0; + size_t i; + + for (i = size; i > 0; i--) + value = (value << 8) | ptr[i - 1]; + + return value; +} + +static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor, + const void *data, size_t size) +{ + const u8 *ptr = data; + unsigned long offset; + size_t i, j; + u32 value; + + switch (ptr[0]) { + case HDMI_INFOFRAME_TYPE_AVI: + offset = SOR_HDMI_AVI_INFOFRAME_HEADER; + break; + + case HDMI_INFOFRAME_TYPE_AUDIO: + offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + offset = SOR_HDMI_VSI_INFOFRAME_HEADER; + break; + + default: + dev_err(sor->dev, "unsupported infoframe type: %02x\n", + ptr[0]); + return; } - tegra_sor_writel(sor, 0, SOR_STATE_1); - tegra_sor_update(sor); + value = INFOFRAME_HEADER_TYPE(ptr[0]) | + INFOFRAME_HEADER_VERSION(ptr[1]) | + INFOFRAME_HEADER_LEN(ptr[2]); + tegra_sor_writel(sor, value, offset); + offset++; /* - * The following accesses registers of the display controller, so make - * sure it's only executed when the output is attached to one. + * Each subpack contains 7 bytes, divided into: + * - subpack_low: bytes 0 - 3 + * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) */ - if (dc) { - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value &= ~SOR_ENABLE; - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + for (i = 3, j = 0; i < size; i += 7, j += 8) { + size_t rem = size - i, num = min_t(size_t, rem, 4); - tegra_dc_commit(dc); - } + value = tegra_sor_hdmi_subpack(&ptr[i], num); + tegra_sor_writel(sor, value, offset++); - err = tegra_sor_power_down(sor); - if (err < 0) { - dev_err(sor->dev, "failed to power down SOR: %d\n", err); - goto unlock; + num = min_t(size_t, rem - num, 3); + + value = tegra_sor_hdmi_subpack(&ptr[i + 4], num); + tegra_sor_writel(sor, value, offset++); } +} - if (sor->dpaux) { - err = tegra_dpaux_disable(sor->dpaux); - if (err < 0) { - dev_err(sor->dev, "failed to disable DP: %d\n", err); - goto unlock; - } +static int +tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor, + const struct drm_display_mode *mode) +{ + u8 buffer[HDMI_INFOFRAME_SIZE(AVI)]; + struct hdmi_avi_infoframe frame; + u32 value; + int err; + + /* disable AVI infoframe */ + value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL); + value &= ~INFOFRAME_CTRL_SINGLE; + value &= ~INFOFRAME_CTRL_OTHER; + value &= ~INFOFRAME_CTRL_ENABLE; + tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL); + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + if (err < 0) { + dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err); + return err; } - err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS); + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { - dev_err(sor->dev, "failed to power off I/O rail: %d\n", err); - goto unlock; + dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err); + return err; } - if (output->panel) - drm_panel_unprepare(output->panel); + tegra_sor_hdmi_write_infopack(sor, buffer, err); - clk_disable_unprepare(sor->clk); - reset_control_assert(sor->rst); + /* enable AVI infoframe */ + value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL); + value |= INFOFRAME_CTRL_CHECKSUM_ENABLE; + value |= INFOFRAME_CTRL_ENABLE; + tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL); + + return 0; +} - sor->enabled = false; +static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor) +{ + u32 value; -unlock: - mutex_unlock(&sor->lock); + value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL); + value &= ~INFOFRAME_CTRL_ENABLE; + tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL); } -static int -tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) +static struct tegra_sor_hdmi_settings * +tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency) +{ + unsigned int i; + + for (i = 0; i < sor->num_settings; i++) + if (frequency <= sor->settings[i].frequency) + return &sor->settings[i]; + + return NULL; +} + +static void tegra_sor_hdmi_disable(struct drm_encoder *encoder) { struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); - unsigned long pclk = crtc_state->mode.clock * 1000; + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_sor *sor = to_sor(output); + u32 value; int err; - err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, - pclk, 0); - if (err < 0) { - dev_err(output->dev, "failed to setup CRTC state: %d\n", err); - return err; + err = tegra_sor_detach(sor); + if (err < 0) + dev_err(sor->dev, "failed to detach SOR: %d\n", err); + + tegra_sor_writel(sor, 0, SOR_STATE1); + tegra_sor_update(sor); + + /* disable display to SOR clock */ + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~SOR1_TIMING_CYA; + value &= ~SOR1_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + err = tegra_sor_power_down(sor); + if (err < 0) + dev_err(sor->dev, "failed to power down SOR: %d\n", err); + + err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI); + if (err < 0) + dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err); + + reset_control_assert(sor->rst); + usleep_range(1000, 2000); + clk_disable_unprepare(sor->clk); +} + +static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + unsigned int h_ref_to_sync = 1, pulse_start, max_ac; + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + unsigned int vbe, vse, hbe, hse, vbs, hbs, div; + struct tegra_sor_hdmi_settings *settings; + struct tegra_sor *sor = to_sor(output); + struct drm_display_mode *mode; + struct drm_display_info *info; + u32 value; + int err; + + mode = &encoder->crtc->state->adjusted_mode; + info = &output->connector.display_info; + + err = clk_prepare_enable(sor->clk); + if (err < 0) + dev_err(sor->dev, "failed to enable clock: %d\n", err); + + usleep_range(1000, 2000); + + reset_control_deassert(sor->rst); + + err = clk_set_parent(sor->clk, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + div = clk_get_rate(sor->clk) / 1000000 * 4; + + err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI); + if (err < 0) + dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err); + + usleep_range(20, 100); + + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); + + usleep_range(20, 100); + + value = tegra_sor_readl(sor, SOR_PLL3); + value &= ~SOR_PLL3_PLL_VDD_MODE_3V3; + tegra_sor_writel(sor, value, SOR_PLL3); + + value = tegra_sor_readl(sor, SOR_PLL0); + value &= ~SOR_PLL0_VCOPD; + value &= ~SOR_PLL0_PWR; + tegra_sor_writel(sor, value, SOR_PLL0); + + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; + tegra_sor_writel(sor, value, SOR_PLL2); + + usleep_range(200, 400); + + value = tegra_sor_readl(sor, SOR_PLL2); + value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; + value &= ~SOR_PLL2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL2); + + usleep_range(20, 100); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); + value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | + SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2; + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); + + while (true) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0) + break; + + usleep_range(250, 1000); } - return 0; + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | + SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5); + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + while (true) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(250, 1000); + } + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; + value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; + + if (mode->clock < 340000) + value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70; + else + value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40; + + value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK; + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + value = tegra_sor_readl(sor, SOR_DP_SPARE0); + value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE; + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + value |= SOR_DP_SPARE_SEQ_ENABLE; + tegra_sor_writel(sor, value, SOR_DP_SPARE0); + + value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) | + SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8); + tegra_sor_writel(sor, value, SOR_SEQ_CTL); + + value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT | + SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1); + tegra_sor_writel(sor, value, SOR_SEQ_INST(0)); + tegra_sor_writel(sor, value, SOR_SEQ_INST(8)); + + /* program the reference clock */ + value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div); + tegra_sor_writel(sor, value, SOR_REFCLK); + + /* XXX don't hardcode */ + value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) | + SOR_XBAR_CTRL_LINK1_XSEL(3, 3) | + SOR_XBAR_CTRL_LINK1_XSEL(2, 2) | + SOR_XBAR_CTRL_LINK1_XSEL(1, 1) | + SOR_XBAR_CTRL_LINK1_XSEL(0, 0) | + SOR_XBAR_CTRL_LINK0_XSEL(4, 4) | + SOR_XBAR_CTRL_LINK0_XSEL(3, 3) | + SOR_XBAR_CTRL_LINK0_XSEL(2, 0) | + SOR_XBAR_CTRL_LINK0_XSEL(1, 1) | + SOR_XBAR_CTRL_LINK0_XSEL(0, 2); + tegra_sor_writel(sor, value, SOR_XBAR_CTRL); + + tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); + + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) + dev_err(sor->dev, "failed to set parent clock: %d\n", err); + + value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe); + + /* XXX is this the proper check? */ + if (mode->clock < 75000) + value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED; + + tegra_sor_writel(sor, value, SOR_INPUT_CONTROL); + + max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32; + + value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) | + SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY); + tegra_sor_writel(sor, value, SOR_HDMI_CTRL); + + /* H_PULSE2 setup */ + pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) + + (mode->htotal - mode->hsync_end) - 10; + + value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE | + PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL; + tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL); + + value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start); + tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A); + + value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0); + value |= H_PULSE2_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0); + + /* infoframe setup */ + err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode); + if (err < 0) + dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err); + + /* XXX HDMI audio support not implemented yet */ + tegra_sor_hdmi_disable_audio_infoframe(sor); + + /* use single TMDS protocol */ + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A; + tegra_sor_writel(sor, value, SOR_STATE1); + + /* power up pad calibration */ + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); + value &= ~SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); + + /* production settings */ + settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000); + if (IS_ERR(settings)) { + dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n", + mode->clock * 1000, PTR_ERR(settings)); + return; + } + + value = tegra_sor_readl(sor, SOR_PLL0); + value &= ~SOR_PLL0_ICHPMP_MASK; + value &= ~SOR_PLL0_VCOCAP_MASK; + value |= SOR_PLL0_ICHPMP(settings->ichpmp); + value |= SOR_PLL0_VCOCAP(settings->vcocap); + tegra_sor_writel(sor, value, SOR_PLL0); + + tegra_sor_dp_term_calibrate(sor); + + value = tegra_sor_readl(sor, SOR_PLL1); + value &= ~SOR_PLL1_LOADADJ_MASK; + value |= SOR_PLL1_LOADADJ(settings->loadadj); + tegra_sor_writel(sor, value, SOR_PLL1); + + value = tegra_sor_readl(sor, SOR_PLL3); + value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK; + value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref); + tegra_sor_writel(sor, value, SOR_PLL3); + + value = settings->drive_current[0] << 24 | + settings->drive_current[1] << 16 | + settings->drive_current[2] << 8 | + settings->drive_current[3] << 0; + tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0); + + value = settings->preemphasis[0] << 24 | + settings->preemphasis[1] << 16 | + settings->preemphasis[2] << 8 | + settings->preemphasis[3] << 0; + tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); + value &= ~SOR_DP_PADCTL_TX_PU_MASK; + value |= SOR_DP_PADCTL_TX_PU_ENABLE; + value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu); + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); + + /* power down pad calibration */ + value = tegra_sor_readl(sor, SOR_DP_PADCTL0); + value |= SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, SOR_DP_PADCTL0); + + /* miscellaneous display controller settings */ + value = VSYNC_H_POSITION(1); + tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS); + + value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL); + value &= ~DITHER_CONTROL_MASK; + value &= ~BASE_COLOR_SIZE_MASK; + + switch (info->bpc) { + case 6: + value |= BASE_COLOR_SIZE_666; + break; + + case 8: + value |= BASE_COLOR_SIZE_888; + break; + + default: + WARN(1, "%u bits-per-color not supported\n", info->bpc); + break; + } + + tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL); + + err = tegra_sor_power_up(sor, 250); + if (err < 0) + dev_err(sor->dev, "failed to power up SOR: %d\n", err); + + /* configure mode */ + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK; + value &= ~SOR_STATE_ASY_CRC_MODE_MASK; + value &= ~SOR_STATE_ASY_OWNER_MASK; + + value |= SOR_STATE_ASY_CRC_MODE_COMPLETE | + SOR_STATE_ASY_OWNER(dc->pipe + 1); + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + value &= ~SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + value |= SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + value &= ~SOR_STATE_ASY_VSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + value |= SOR_STATE_ASY_VSYNCPOL; + + switch (info->bpc) { + case 8: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; + break; + + case 6: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444; + break; + + default: + BUG(); + break; + } + + tegra_sor_writel(sor, value, SOR_STATE1); + + value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe)); + value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK; + value &= ~SOR_HEAD_STATE_DYNRANGE_MASK; + tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe)); + + value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe)); + value &= ~SOR_HEAD_STATE_COLORSPACE_MASK; + value |= SOR_HEAD_STATE_COLORSPACE_RGB; + tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe)); + + /* + * TODO: The video timing programming below doesn't seem to match the + * register definitions. + */ + + value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe)); + + /* sync end = sync width - 1 */ + vse = mode->vsync_end - mode->vsync_start - 1; + hse = mode->hsync_end - mode->hsync_start - 1; + + value = ((vse & 0x7fff) << 16) | (hse & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe)); + + /* blank end = sync end + back porch */ + vbe = vse + (mode->vtotal - mode->vsync_end); + hbe = hse + (mode->htotal - mode->hsync_end); + + value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe)); + + /* blank start = blank end + active */ + vbs = vbe + mode->vdisplay; + hbs = hbe + mode->hdisplay; + + value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe)); + + tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe)); + + tegra_sor_update(sor); + + err = tegra_sor_attach(sor); + if (err < 0) + dev_err(sor->dev, "failed to attach SOR: %d\n", err); + + /* enable display to SOR clock and generate HDMI preamble */ + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= SOR1_ENABLE | SOR1_TIMING_CYA; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + err = tegra_sor_wakeup(sor); + if (err < 0) + dev_err(sor->dev, "failed to wakeup SOR: %d\n", err); } -static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = { - .dpms = tegra_sor_encoder_dpms, - .prepare = tegra_sor_encoder_prepare, - .commit = tegra_sor_encoder_commit, - .mode_set = tegra_sor_encoder_mode_set, - .disable = tegra_sor_encoder_disable, +static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = { + .disable = tegra_sor_hdmi_disable, + .enable = tegra_sor_hdmi_enable, .atomic_check = tegra_sor_encoder_atomic_check, }; static int tegra_sor_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); + const struct drm_encoder_helper_funcs *helpers = NULL; struct tegra_sor *sor = host1x_client_to_sor(client); + int connector = DRM_MODE_CONNECTOR_Unknown; + int encoder = DRM_MODE_ENCODER_NONE; int err; - if (!sor->dpaux) - return -ENODEV; + if (!sor->dpaux) { + if (sor->soc->supports_hdmi) { + connector = DRM_MODE_CONNECTOR_HDMIA; + encoder = DRM_MODE_ENCODER_TMDS; + helpers = &tegra_sor_hdmi_helpers; + } else if (sor->soc->supports_lvds) { + connector = DRM_MODE_CONNECTOR_LVDS; + encoder = DRM_MODE_ENCODER_LVDS; + } + } else { + if (sor->soc->supports_edp) { + connector = DRM_MODE_CONNECTOR_eDP; + encoder = DRM_MODE_ENCODER_TMDS; + helpers = &tegra_sor_edp_helpers; + } else if (sor->soc->supports_dp) { + connector = DRM_MODE_CONNECTOR_DisplayPort; + encoder = DRM_MODE_ENCODER_TMDS; + } + } sor->output.dev = sor->dev; drm_connector_init(drm, &sor->output.connector, &tegra_sor_connector_funcs, - DRM_MODE_CONNECTOR_eDP); + connector); drm_connector_helper_add(&sor->output.connector, &tegra_sor_connector_helper_funcs); sor->output.connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&sor->output.encoder, - &tegra_sor_encoder_helper_funcs); + encoder); + drm_encoder_helper_add(&sor->output.encoder, helpers); drm_mode_connector_attach_encoder(&sor->output.connector, &sor->output.encoder); @@ -1578,18 +2272,130 @@ static const struct host1x_client_ops sor_client_ops = { .exit = tegra_sor_exit, }; +static const struct tegra_sor_ops tegra_sor_edp_ops = { + .name = "eDP", +}; + +static int tegra_sor_hdmi_probe(struct tegra_sor *sor) +{ + int err; + + sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io"); + if (IS_ERR(sor->avdd_io_supply)) { + dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n", + PTR_ERR(sor->avdd_io_supply)); + return PTR_ERR(sor->avdd_io_supply); + } + + err = regulator_enable(sor->avdd_io_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", + err); + return err; + } + + sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll"); + if (IS_ERR(sor->vdd_pll_supply)) { + dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n", + PTR_ERR(sor->vdd_pll_supply)); + return PTR_ERR(sor->vdd_pll_supply); + } + + err = regulator_enable(sor->vdd_pll_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", + err); + return err; + } + + sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi"); + if (IS_ERR(sor->hdmi_supply)) { + dev_err(sor->dev, "cannot get HDMI supply: %ld\n", + PTR_ERR(sor->hdmi_supply)); + return PTR_ERR(sor->hdmi_supply); + } + + err = regulator_enable(sor->hdmi_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); + return err; + } + + return 0; +} + +static int tegra_sor_hdmi_remove(struct tegra_sor *sor) +{ + regulator_disable(sor->hdmi_supply); + regulator_disable(sor->vdd_pll_supply); + regulator_disable(sor->avdd_io_supply); + + return 0; +} + +static const struct tegra_sor_ops tegra_sor_hdmi_ops = { + .name = "HDMI", + .probe = tegra_sor_hdmi_probe, + .remove = tegra_sor_hdmi_remove, +}; + +static const struct tegra_sor_soc tegra124_sor = { + .supports_edp = true, + .supports_lvds = true, + .supports_hdmi = false, + .supports_dp = false, +}; + +static const struct tegra_sor_soc tegra210_sor = { + .supports_edp = true, + .supports_lvds = false, + .supports_hdmi = false, + .supports_dp = false, +}; + +static const struct tegra_sor_soc tegra210_sor1 = { + .supports_edp = false, + .supports_lvds = false, + .supports_hdmi = true, + .supports_dp = true, + + .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults), + .settings = tegra210_sor_hdmi_defaults, +}; + +static const struct of_device_id tegra_sor_of_match[] = { + { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 }, + { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor }, + { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_sor_of_match); + static int tegra_sor_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct device_node *np; struct tegra_sor *sor; struct resource *regs; int err; + match = of_match_device(tegra_sor_of_match, &pdev->dev); + sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL); if (!sor) return -ENOMEM; sor->output.dev = sor->dev = &pdev->dev; + sor->soc = match->data; + + sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings, + sor->soc->num_settings * + sizeof(*sor->settings), + GFP_KERNEL); + if (!sor->settings) + return -ENOMEM; + + sor->num_settings = sor->soc->num_settings; np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0); if (np) { @@ -1600,51 +2406,106 @@ static int tegra_sor_probe(struct platform_device *pdev) return -EPROBE_DEFER; } + if (!sor->dpaux) { + if (sor->soc->supports_hdmi) { + sor->ops = &tegra_sor_hdmi_ops; + } else if (sor->soc->supports_lvds) { + dev_err(&pdev->dev, "LVDS not supported yet\n"); + return -ENODEV; + } else { + dev_err(&pdev->dev, "unknown (non-DP) support\n"); + return -ENODEV; + } + } else { + if (sor->soc->supports_edp) { + sor->ops = &tegra_sor_edp_ops; + } else if (sor->soc->supports_dp) { + dev_err(&pdev->dev, "DisplayPort not supported yet\n"); + return -ENODEV; + } else { + dev_err(&pdev->dev, "unknown (DP) support\n"); + return -ENODEV; + } + } + err = tegra_output_probe(&sor->output); - if (err < 0) + if (err < 0) { + dev_err(&pdev->dev, "failed to probe output: %d\n", err); return err; + } + + if (sor->ops && sor->ops->probe) { + err = sor->ops->probe(sor); + if (err < 0) { + dev_err(&pdev->dev, "failed to probe %s: %d\n", + sor->ops->name, err); + goto output; + } + } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); sor->regs = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(sor->regs)) - return PTR_ERR(sor->regs); + if (IS_ERR(sor->regs)) { + err = PTR_ERR(sor->regs); + goto remove; + } sor->rst = devm_reset_control_get(&pdev->dev, "sor"); - if (IS_ERR(sor->rst)) - return PTR_ERR(sor->rst); + if (IS_ERR(sor->rst)) { + err = PTR_ERR(sor->rst); + dev_err(&pdev->dev, "failed to get reset control: %d\n", err); + goto remove; + } sor->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(sor->clk)) - return PTR_ERR(sor->clk); + if (IS_ERR(sor->clk)) { + err = PTR_ERR(sor->clk); + dev_err(&pdev->dev, "failed to get module clock: %d\n", err); + goto remove; + } sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); - if (IS_ERR(sor->clk_parent)) - return PTR_ERR(sor->clk_parent); + if (IS_ERR(sor->clk_parent)) { + err = PTR_ERR(sor->clk_parent); + dev_err(&pdev->dev, "failed to get parent clock: %d\n", err); + goto remove; + } sor->clk_safe = devm_clk_get(&pdev->dev, "safe"); - if (IS_ERR(sor->clk_safe)) - return PTR_ERR(sor->clk_safe); + if (IS_ERR(sor->clk_safe)) { + err = PTR_ERR(sor->clk_safe); + dev_err(&pdev->dev, "failed to get safe clock: %d\n", err); + goto remove; + } sor->clk_dp = devm_clk_get(&pdev->dev, "dp"); - if (IS_ERR(sor->clk_dp)) - return PTR_ERR(sor->clk_dp); + if (IS_ERR(sor->clk_dp)) { + err = PTR_ERR(sor->clk_dp); + dev_err(&pdev->dev, "failed to get DP clock: %d\n", err); + goto remove; + } INIT_LIST_HEAD(&sor->client.list); sor->client.ops = &sor_client_ops; sor->client.dev = &pdev->dev; - mutex_init(&sor->lock); - err = host1x_client_register(&sor->client); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); - return err; + goto remove; } platform_set_drvdata(pdev, sor); return 0; + +remove: + if (sor->ops && sor->ops->remove) + sor->ops->remove(sor); +output: + tegra_output_remove(&sor->output); + return err; } static int tegra_sor_remove(struct platform_device *pdev) @@ -1659,17 +2520,17 @@ static int tegra_sor_remove(struct platform_device *pdev) return err; } + if (sor->ops && sor->ops->remove) { + err = sor->ops->remove(sor); + if (err < 0) + dev_err(&pdev->dev, "failed to remove SOR: %d\n", err); + } + tegra_output_remove(&sor->output); return 0; } -static const struct of_device_id tegra_sor_of_match[] = { - { .compatible = "nvidia,tegra124-sor", }, - { }, -}; -MODULE_DEVICE_TABLE(of, tegra_sor_of_match); - struct platform_driver tegra_sor_driver = { .driver = { .name = "tegra-sor", diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h index a5f8853fedb5..2d31d027e3f6 100644 --- a/drivers/gpu/drm/tegra/sor.h +++ b/drivers/gpu/drm/tegra/sor.h @@ -11,9 +11,9 @@ #define SOR_CTXSW 0x00 -#define SOR_SUPER_STATE_0 0x01 +#define SOR_SUPER_STATE0 0x01 -#define SOR_SUPER_STATE_1 0x02 +#define SOR_SUPER_STATE1 0x02 #define SOR_SUPER_STATE_ATTACHED (1 << 3) #define SOR_SUPER_STATE_MODE_NORMAL (1 << 2) #define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0) @@ -21,9 +21,9 @@ #define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0) #define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0) -#define SOR_STATE_0 0x03 +#define SOR_STATE0 0x03 -#define SOR_STATE_1 0x04 +#define SOR_STATE1 0x04 #define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17) #define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17) #define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17) @@ -33,19 +33,27 @@ #define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8) #define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8) #define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8) +#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (0x1 << 8) #define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8) #define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6) #define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6) #define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6) #define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6) +#define SOR_STATE_ASY_OWNER_MASK 0xf #define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0) -#define SOR_HEAD_STATE_0(x) (0x05 + (x)) -#define SOR_HEAD_STATE_1(x) (0x07 + (x)) -#define SOR_HEAD_STATE_2(x) (0x09 + (x)) -#define SOR_HEAD_STATE_3(x) (0x0b + (x)) -#define SOR_HEAD_STATE_4(x) (0x0d + (x)) -#define SOR_HEAD_STATE_5(x) (0x0f + (x)) +#define SOR_HEAD_STATE0(x) (0x05 + (x)) +#define SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3) +#define SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2) +#define SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2) +#define SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2) +#define SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0) +#define SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0) +#define SOR_HEAD_STATE1(x) (0x07 + (x)) +#define SOR_HEAD_STATE2(x) (0x09 + (x)) +#define SOR_HEAD_STATE3(x) (0x0b + (x)) +#define SOR_HEAD_STATE4(x) (0x0d + (x)) +#define SOR_HEAD_STATE5(x) (0x0f + (x)) #define SOR_CRC_CNTRL 0x11 #define SOR_CRC_CNTRL_ENABLE (1 << 0) #define SOR_DP_DEBUG_MVID 0x12 @@ -75,62 +83,101 @@ #define SOR_TEST_HEAD_MODE_MASK (3 << 8) #define SOR_TEST_HEAD_MODE_AWAKE (2 << 8) -#define SOR_PLL_0 0x17 -#define SOR_PLL_0_ICHPMP_MASK (0xf << 24) -#define SOR_PLL_0_ICHPMP(x) (((x) & 0xf) << 24) -#define SOR_PLL_0_VCOCAP_MASK (0xf << 8) -#define SOR_PLL_0_VCOCAP(x) (((x) & 0xf) << 8) -#define SOR_PLL_0_VCOCAP_RST SOR_PLL_0_VCOCAP(3) -#define SOR_PLL_0_PLLREG_MASK (0x3 << 6) -#define SOR_PLL_0_PLLREG_LEVEL(x) (((x) & 0x3) << 6) -#define SOR_PLL_0_PLLREG_LEVEL_V25 SOR_PLL_0_PLLREG_LEVEL(0) -#define SOR_PLL_0_PLLREG_LEVEL_V15 SOR_PLL_0_PLLREG_LEVEL(1) -#define SOR_PLL_0_PLLREG_LEVEL_V35 SOR_PLL_0_PLLREG_LEVEL(2) -#define SOR_PLL_0_PLLREG_LEVEL_V45 SOR_PLL_0_PLLREG_LEVEL(3) -#define SOR_PLL_0_PULLDOWN (1 << 5) -#define SOR_PLL_0_RESISTOR_EXT (1 << 4) -#define SOR_PLL_0_VCOPD (1 << 2) -#define SOR_PLL_0_POWER_OFF (1 << 0) - -#define SOR_PLL_1 0x18 +#define SOR_PLL0 0x17 +#define SOR_PLL0_ICHPMP_MASK (0xf << 24) +#define SOR_PLL0_ICHPMP(x) (((x) & 0xf) << 24) +#define SOR_PLL0_VCOCAP_MASK (0xf << 8) +#define SOR_PLL0_VCOCAP(x) (((x) & 0xf) << 8) +#define SOR_PLL0_VCOCAP_RST SOR_PLL0_VCOCAP(3) +#define SOR_PLL0_PLLREG_MASK (0x3 << 6) +#define SOR_PLL0_PLLREG_LEVEL(x) (((x) & 0x3) << 6) +#define SOR_PLL0_PLLREG_LEVEL_V25 SOR_PLL0_PLLREG_LEVEL(0) +#define SOR_PLL0_PLLREG_LEVEL_V15 SOR_PLL0_PLLREG_LEVEL(1) +#define SOR_PLL0_PLLREG_LEVEL_V35 SOR_PLL0_PLLREG_LEVEL(2) +#define SOR_PLL0_PLLREG_LEVEL_V45 SOR_PLL0_PLLREG_LEVEL(3) +#define SOR_PLL0_PULLDOWN (1 << 5) +#define SOR_PLL0_RESISTOR_EXT (1 << 4) +#define SOR_PLL0_VCOPD (1 << 2) +#define SOR_PLL0_PWR (1 << 0) + +#define SOR_PLL1 0x18 /* XXX: read-only bit? */ -#define SOR_PLL_1_TERM_COMPOUT (1 << 15) -#define SOR_PLL_1_TMDS_TERM (1 << 8) - -#define SOR_PLL_2 0x19 -#define SOR_PLL_2_LVDS_ENABLE (1 << 25) -#define SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE (1 << 24) -#define SOR_PLL_2_PORT_POWERDOWN (1 << 23) -#define SOR_PLL_2_BANDGAP_POWERDOWN (1 << 22) -#define SOR_PLL_2_POWERDOWN_OVERRIDE (1 << 18) -#define SOR_PLL_2_SEQ_PLLCAPPD (1 << 17) - -#define SOR_PLL_3 0x1a -#define SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13) -#define SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13) +#define SOR_PLL1_LOADADJ_MASK (0xf << 20) +#define SOR_PLL1_LOADADJ(x) (((x) & 0xf) << 20) +#define SOR_PLL1_TERM_COMPOUT (1 << 15) +#define SOR_PLL1_TMDS_TERMADJ_MASK (0xf << 9) +#define SOR_PLL1_TMDS_TERMADJ(x) (((x) & 0xf) << 9) +#define SOR_PLL1_TMDS_TERM (1 << 8) + +#define SOR_PLL2 0x19 +#define SOR_PLL2_LVDS_ENABLE (1 << 25) +#define SOR_PLL2_SEQ_PLLCAPPD_ENFORCE (1 << 24) +#define SOR_PLL2_PORT_POWERDOWN (1 << 23) +#define SOR_PLL2_BANDGAP_POWERDOWN (1 << 22) +#define SOR_PLL2_POWERDOWN_OVERRIDE (1 << 18) +#define SOR_PLL2_SEQ_PLLCAPPD (1 << 17) +#define SOR_PLL2_SEQ_PLL_PULLDOWN (1 << 16) + +#define SOR_PLL3 0x1a +#define SOR_PLL3_BG_VREF_LEVEL_MASK (0xf << 24) +#define SOR_PLL3_BG_VREF_LEVEL(x) (((x) & 0xf) << 24) +#define SOR_PLL3_PLL_VDD_MODE_1V8 (0 << 13) +#define SOR_PLL3_PLL_VDD_MODE_3V3 (1 << 13) #define SOR_CSTM 0x1b +#define SOR_CSTM_ROTCLK_MASK (0xf << 24) +#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24) #define SOR_CSTM_LVDS (1 << 16) #define SOR_CSTM_LINK_ACT_B (1 << 15) #define SOR_CSTM_LINK_ACT_A (1 << 14) #define SOR_CSTM_UPPER (1 << 11) #define SOR_LVDS 0x1c -#define SOR_CRC_A 0x1d -#define SOR_CRC_A_VALID (1 << 0) -#define SOR_CRC_A_RESET (1 << 0) -#define SOR_CRC_B 0x1e +#define SOR_CRCA 0x1d +#define SOR_CRCA_VALID (1 << 0) +#define SOR_CRCA_RESET (1 << 0) +#define SOR_CRCB 0x1e #define SOR_BLANK 0x1f #define SOR_SEQ_CTL 0x20 +#define SOR_SEQ_CTL_PD_PC_ALT(x) (((x) & 0xf) << 12) +#define SOR_SEQ_CTL_PD_PC(x) (((x) & 0xf) << 8) +#define SOR_SEQ_CTL_PU_PC_ALT(x) (((x) & 0xf) << 4) +#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0) #define SOR_LANE_SEQ_CTL 0x21 #define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31) +#define SOR_LANE_SEQ_CTL_STATE_BUSY (1 << 28) #define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20) #define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20) #define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16) #define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16) +#define SOR_LANE_SEQ_CTL_DELAY(x) (((x) & 0xf) << 12) #define SOR_SEQ_INST(x) (0x22 + (x)) +#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31) +#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30) +#define SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29) +#define SOR_SEQ_INST_BLANK_V (1 << 28) +#define SOR_SEQ_INST_BLANK_H (1 << 27) +#define SOR_SEQ_INST_BLANK_DE (1 << 26) +#define SOR_SEQ_INST_BLACK_DATA (1 << 25) +#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24) +#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23) +#define SOR_SEQ_INST_PIN_B_LOW (0 << 22) +#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22) +#define SOR_SEQ_INST_PIN_A_LOW (0 << 21) +#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21) +#define SOR_SEQ_INST_SEQUENCE_UP (0 << 19) +#define SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19) +#define SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18) +#define SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18) +#define SOR_SEQ_INST_PORT_POWERDOWN (1 << 17) +#define SOR_SEQ_INST_PLL_POWERDOWN (1 << 16) +#define SOR_SEQ_INST_HALT (1 << 15) +#define SOR_SEQ_INST_WAIT_US (0 << 12) +#define SOR_SEQ_INST_WAIT_MS (1 << 12) +#define SOR_SEQ_INST_WAIT_VSYNC (2 << 12) +#define SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0) #define SOR_PWM_DIV 0x32 #define SOR_PWM_DIV_MASK 0xffffff @@ -140,32 +187,36 @@ #define SOR_PWM_CTL_CLK_SEL (1 << 30) #define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff -#define SOR_VCRC_A_0 0x34 -#define SOR_VCRC_A_1 0x35 -#define SOR_VCRC_B_0 0x36 -#define SOR_VCRC_B_1 0x37 -#define SOR_CCRC_A_0 0x38 -#define SOR_CCRC_A_1 0x39 -#define SOR_CCRC_B_0 0x3a -#define SOR_CCRC_B_1 0x3b -#define SOR_EDATA_A_0 0x3c -#define SOR_EDATA_A_1 0x3d -#define SOR_EDATA_B_0 0x3e -#define SOR_EDATA_B_1 0x3f -#define SOR_COUNT_A_0 0x40 -#define SOR_COUNT_A_1 0x41 -#define SOR_COUNT_B_0 0x42 -#define SOR_COUNT_B_1 0x43 -#define SOR_DEBUG_A_0 0x44 -#define SOR_DEBUG_A_1 0x45 -#define SOR_DEBUG_B_0 0x46 -#define SOR_DEBUG_B_1 0x47 +#define SOR_VCRC_A0 0x34 +#define SOR_VCRC_A1 0x35 +#define SOR_VCRC_B0 0x36 +#define SOR_VCRC_B1 0x37 +#define SOR_CCRC_A0 0x38 +#define SOR_CCRC_A1 0x39 +#define SOR_CCRC_B0 0x3a +#define SOR_CCRC_B1 0x3b +#define SOR_EDATA_A0 0x3c +#define SOR_EDATA_A1 0x3d +#define SOR_EDATA_B0 0x3e +#define SOR_EDATA_B1 0x3f +#define SOR_COUNT_A0 0x40 +#define SOR_COUNT_A1 0x41 +#define SOR_COUNT_B0 0x42 +#define SOR_COUNT_B1 0x43 +#define SOR_DEBUG_A0 0x44 +#define SOR_DEBUG_A1 0x45 +#define SOR_DEBUG_B0 0x46 +#define SOR_DEBUG_B1 0x47 #define SOR_TRIG 0x48 #define SOR_MSCHECK 0x49 #define SOR_XBAR_CTRL 0x4a +#define SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17) +#define SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 2) +#define SOR_XBAR_CTRL_LINK_SWAP (1 << 1) +#define SOR_XBAR_CTRL_BYPASS (1 << 0) #define SOR_XBAR_POL 0x4b -#define SOR_DP_LINKCTL_0 0x4c +#define SOR_DP_LINKCTL0 0x4c #define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16) #define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16) #define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14) @@ -173,34 +224,34 @@ #define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2) #define SOR_DP_LINKCTL_ENABLE (1 << 0) -#define SOR_DP_LINKCTL_1 0x4d +#define SOR_DP_LINKCTL1 0x4d -#define SOR_LANE_DRIVE_CURRENT_0 0x4e -#define SOR_LANE_DRIVE_CURRENT_1 0x4f -#define SOR_LANE4_DRIVE_CURRENT_0 0x50 -#define SOR_LANE4_DRIVE_CURRENT_1 0x51 +#define SOR_LANE_DRIVE_CURRENT0 0x4e +#define SOR_LANE_DRIVE_CURRENT1 0x4f +#define SOR_LANE4_DRIVE_CURRENT0 0x50 +#define SOR_LANE4_DRIVE_CURRENT1 0x51 #define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24) #define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16) #define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8) #define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0) -#define SOR_LANE_PREEMPHASIS_0 0x52 -#define SOR_LANE_PREEMPHASIS_1 0x53 -#define SOR_LANE4_PREEMPHASIS_0 0x54 -#define SOR_LANE4_PREEMPHASIS_1 0x55 +#define SOR_LANE_PREEMPHASIS0 0x52 +#define SOR_LANE_PREEMPHASIS1 0x53 +#define SOR_LANE4_PREEMPHASIS0 0x54 +#define SOR_LANE4_PREEMPHASIS1 0x55 #define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24) #define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16) #define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8) #define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0) -#define SOR_LANE_POST_CURSOR_0 0x56 -#define SOR_LANE_POST_CURSOR_1 0x57 -#define SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24) -#define SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16) -#define SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8) -#define SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0) +#define SOR_LANE_POSTCURSOR0 0x56 +#define SOR_LANE_POSTCURSOR1 0x57 +#define SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24) +#define SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16) +#define SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8) +#define SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0) -#define SOR_DP_CONFIG_0 0x58 +#define SOR_DP_CONFIG0 0x58 #define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31) #define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26) #define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24) @@ -211,11 +262,11 @@ #define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0) #define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0) -#define SOR_DP_CONFIG_1 0x59 -#define SOR_DP_MN_0 0x5a -#define SOR_DP_MN_1 0x5b +#define SOR_DP_CONFIG1 0x59 +#define SOR_DP_MN0 0x5a +#define SOR_DP_MN1 0x5b -#define SOR_DP_PADCTL_0 0x5c +#define SOR_DP_PADCTL0 0x5c #define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23) #define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22) #define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8) @@ -229,17 +280,18 @@ #define SOR_DP_PADCTL_PD_TXD_1 (1 << 1) #define SOR_DP_PADCTL_PD_TXD_2 (1 << 0) -#define SOR_DP_PADCTL_1 0x5d +#define SOR_DP_PADCTL1 0x5d -#define SOR_DP_DEBUG_0 0x5e -#define SOR_DP_DEBUG_1 0x5f +#define SOR_DP_DEBUG0 0x5e +#define SOR_DP_DEBUG1 0x5f -#define SOR_DP_SPARE_0 0x60 -#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2) -#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1) -#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0) +#define SOR_DP_SPARE0 0x60 +#define SOR_DP_SPARE_DISP_VIDEO_PREAMBLE (1 << 3) +#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2) +#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1) +#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0) -#define SOR_DP_SPARE_1 0x61 +#define SOR_DP_SPARE1 0x61 #define SOR_DP_AUDIO_CTRL 0x62 #define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63 @@ -249,13 +301,13 @@ #define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0) #define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65 -#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66 -#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67 -#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68 -#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69 -#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a -#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b -#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69 +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b +#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c #define SOR_DP_TPG 0x6d #define SOR_DP_TPG_CHANNEL_CODING (1 << 6) @@ -275,8 +327,44 @@ #define SOR_DP_TPG_PATTERN_NONE (0x0 << 0) #define SOR_DP_TPG_CONFIG 0x6e -#define SOR_DP_LQ_CSTM_0 0x6f -#define SOR_DP_LQ_CSTM_1 0x70 -#define SOR_DP_LQ_CSTM_2 0x71 +#define SOR_DP_LQ_CSTM0 0x6f +#define SOR_DP_LQ_CSTM1 0x70 +#define SOR_DP_LQ_CSTM2 0x71 + +#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a +#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b +#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c + +#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f +#define INFOFRAME_CTRL_CHECKSUM_ENABLE (1 << 9) +#define INFOFRAME_CTRL_SINGLE (1 << 8) +#define INFOFRAME_CTRL_OTHER (1 << 4) +#define INFOFRAME_CTRL_ENABLE (1 << 0) + +#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0 +#define INFOFRAME_STATUS_DONE (1 << 0) + +#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1 +#define INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16) +#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) +#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) + +#define SOR_HDMI_CTRL 0xc0 +#define SOR_HDMI_CTRL_ENABLE (1 << 30) +#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16) +#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10) +#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0) + +#define SOR_REFCLK 0xe6 +#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8) +#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6) + +#define SOR_INPUT_CONTROL 0xe8 +#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1) +#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0) + +#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123 +#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124 +#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125 #endif diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index bf080abc86d1..4e19d0f9cc30 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -340,7 +340,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) swap_storage = shmem_file_setup("ttm swap", ttm->num_pages << PAGE_SHIFT, 0); - if (unlikely(IS_ERR(swap_storage))) { + if (IS_ERR(swap_storage)) { pr_err("Failed allocating swap storage\n"); return PTR_ERR(swap_storage); } @@ -354,7 +354,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) if (unlikely(from_page == NULL)) continue; to_page = shmem_read_mapping_page(swap_space, i); - if (unlikely(IS_ERR(to_page))) { + if (IS_ERR(to_page)) { ret = PTR_ERR(to_page); goto out_err; } diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 5fc16cecd3ba..62c7b1dafaa4 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -288,7 +288,7 @@ static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect { struct udl_fbdev *ufbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width, rect->height); @@ -298,7 +298,7 @@ static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *regi { struct udl_fbdev *ufbdev = info->par; - sys_copyarea(info, region); + drm_fb_helper_sys_copyarea(info, region); udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width, region->height); @@ -308,7 +308,7 @@ static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image) { struct udl_fbdev *ufbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width, image->height); @@ -476,7 +476,6 @@ static int udlfb_create(struct drm_fb_helper *helper, container_of(helper, struct udl_fbdev, helper); struct drm_device *dev = ufbdev->helper.dev; struct fb_info *info; - struct device *device = dev->dev; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; struct udl_gem_object *obj; @@ -506,21 +505,20 @@ static int udlfb_create(struct drm_fb_helper *helper, goto out_gfree; } - info = framebuffer_alloc(0, device); - if (!info) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_gfree; } info->par = ufbdev; ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj); if (ret) - goto out_gfree; + goto out_destroy_fbi; fb = &ufbdev->ufb.base; ufbdev->helper.fb = fb; - ufbdev->helper.fbdev = info; strcpy(info->fix.id, "udldrmfb"); @@ -533,18 +531,13 @@ static int udlfb_create(struct drm_fb_helper *helper, drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height); - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out_gfree; - } - - DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", fb->width, fb->height, ufbdev->ufb.obj->vmapping); return ret; +out_destroy_fbi: + drm_fb_helper_release_fbi(helper); out_gfree: drm_gem_object_unreference(&ufbdev->ufb.obj->base); out: @@ -558,14 +551,8 @@ static const struct drm_fb_helper_funcs udl_fb_helper_funcs = { static void udl_fbdev_destroy(struct drm_device *dev, struct udl_fbdev *ufbdev) { - struct fb_info *info; - if (ufbdev->helper.fbdev) { - info = ufbdev->helper.fbdev; - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&ufbdev->helper); + drm_fb_helper_release_fbi(&ufbdev->helper); drm_fb_helper_fini(&ufbdev->helper); drm_framebuffer_unregister_private(&ufbdev->ufb.base); drm_framebuffer_cleanup(&ufbdev->ufb.base); @@ -631,11 +618,7 @@ void udl_fbdev_unplug(struct drm_device *dev) return; ufbdev = udl->fbdev; - if (ufbdev->helper.fbdev) { - struct fb_info *info; - info = ufbdev->helper.fbdev; - unlink_framebuffer(info); - } + drm_fb_helper_unlink_fbi(&ufbdev->helper); } struct drm_framebuffer * diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index df198d9e770c..6a81e084593b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -173,7 +173,7 @@ static void virtio_gpu_3d_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct virtio_gpu_fbdev *vfbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy, rect->width, rect->height); schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); @@ -183,7 +183,7 @@ static void virtio_gpu_3d_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct virtio_gpu_fbdev *vfbdev = info->par; - sys_copyarea(info, area); + drm_fb_helper_sys_copyarea(info, area); virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy, area->width, area->height); schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); @@ -193,7 +193,7 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info, const struct fb_image *image) { struct virtio_gpu_fbdev *vfbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy, image->width, image->height); schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); @@ -230,7 +230,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd = {}; struct virtio_gpu_object *obj; - struct device *device = vgdev->dev; uint32_t resid, format, size; int ret; @@ -317,18 +316,12 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, if (ret) goto err_obj_attach; - info = framebuffer_alloc(0, device); - if (!info) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto err_fb_alloc; } - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto err_fb_alloc_cmap; - } - info->par = helper; ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb, @@ -339,7 +332,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, fb = &vfbdev->vgfb.base; vfbdev->helper.fb = fb; - vfbdev->helper.fbdev = info; strcpy(info->fix.id, "virtiodrmfb"); info->flags = FBINFO_DEFAULT; @@ -357,9 +349,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, return 0; err_fb_init: - fb_dealloc_cmap(&info->cmap); -err_fb_alloc_cmap: - framebuffer_release(info); + drm_fb_helper_release_fbi(helper); err_fb_alloc: virtio_gpu_cmd_resource_inval_backing(vgdev, resid); err_obj_attach: @@ -371,15 +361,11 @@ err_obj_vmap: static int virtio_gpu_fbdev_destroy(struct drm_device *dev, struct virtio_gpu_fbdev *vgfbdev) { - struct fb_info *info; struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb; - if (vgfbdev->helper.fbdev) { - info = vgfbdev->helper.fbdev; + drm_fb_helper_unregister_fbi(&vgfbdev->helper); + drm_fb_helper_release_fbi(&vgfbdev->helper); - unregister_framebuffer(info); - framebuffer_release(info); - } if (vgfb->obj) vgfb->obj = NULL; drm_fb_helper_fini(&vgfbdev->helper); diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index ce0ab951f507..d281575bbe11 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -7,6 +7,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ - vmwgfx_cmdbuf_res.o \ + vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ + vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h new file mode 100644 index 000000000000..8cce7f15b6eb --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h @@ -0,0 +1,3 @@ +/* + * Intentionally empty file. + */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h new file mode 100644 index 000000000000..9ce2466a5d00 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h @@ -0,0 +1,110 @@ +/********************************************************** + * Copyright 2007-2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +/* + * svga3d_caps.h -- + * + * Definitions for SVGA3D hardware capabilities. Capabilities + * are used to query for optional rendering features during + * driver initialization. The capability data is stored as very + * basic key/value dictionary within the "FIFO register" memory + * area at the beginning of BAR2. + * + * Note that these definitions are only for 3D capabilities. + * The SVGA device also has "device capabilities" and "FIFO + * capabilities", which are non-3D-specific and are stored as + * bitfields rather than key/value pairs. + */ + +#ifndef _SVGA3D_CAPS_H_ +#define _SVGA3D_CAPS_H_ + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL + +#include "includeCheck.h" + +#include "svga_reg.h" + +#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \ + SVGA_FIFO_3D_CAPS + 1) + + +/* + * SVGA3dCapsRecordType + * + * Record types that can be found in the caps block. + * Related record types are grouped together numerically so that + * SVGA3dCaps_FindRecord() can be applied on a range of record + * types. + */ + +typedef enum { + SVGA3DCAPS_RECORD_UNKNOWN = 0, + SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, +} SVGA3dCapsRecordType; + + +/* + * SVGA3dCapsRecordHeader + * + * Header field leading each caps block record. Contains the offset (in + * register words, NOT bytes) to the next caps block record (or the end + * of caps block records which will be a zero word) and the record type + * as defined above. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCapsRecordHeader { + uint32 length; + SVGA3dCapsRecordType type; +} +#include "vmware_pack_end.h" +SVGA3dCapsRecordHeader; + + +/* + * SVGA3dCapsRecord + * + * Caps block record; "data" is a placeholder for the actual data structure + * contained within the record; + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCapsRecord { + SVGA3dCapsRecordHeader header; + uint32 data[1]; +} +#include "vmware_pack_end.h" +SVGA3dCapsRecord; + + +typedef uint32 SVGA3dCapPair[2]; + +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h new file mode 100644 index 000000000000..2dfd57c5f463 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h @@ -0,0 +1,2071 @@ +/********************************************************** + * Copyright 1998-2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +/* + * svga3d_cmd.h -- + * + * SVGA 3d hardware cmd definitions + */ + +#ifndef _SVGA3D_CMD_H_ +#define _SVGA3D_CMD_H_ + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_VMCORE + +#include "includeCheck.h" +#include "svga3d_types.h" + +/* + * Identifiers for commands in the command FIFO. + * + * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of + * the SVGA3D protocol and remain reserved; they should not be used in the + * future. + * + * IDs between 1040 and 1999 (inclusive) are available for use by the + * current SVGA3D protocol. + * + * FIFO clients other than SVGA3D should stay below 1000, or at 2000 + * and up. + */ + +typedef enum { + SVGA_3D_CMD_LEGACY_BASE = 1000, + SVGA_3D_CMD_BASE = 1040, + + SVGA_3D_CMD_SURFACE_DEFINE = 1040, + SVGA_3D_CMD_SURFACE_DESTROY = 1041, + SVGA_3D_CMD_SURFACE_COPY = 1042, + SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043, + SVGA_3D_CMD_SURFACE_DMA = 1044, + SVGA_3D_CMD_CONTEXT_DEFINE = 1045, + SVGA_3D_CMD_CONTEXT_DESTROY = 1046, + SVGA_3D_CMD_SETTRANSFORM = 1047, + SVGA_3D_CMD_SETZRANGE = 1048, + SVGA_3D_CMD_SETRENDERSTATE = 1049, + SVGA_3D_CMD_SETRENDERTARGET = 1050, + SVGA_3D_CMD_SETTEXTURESTATE = 1051, + SVGA_3D_CMD_SETMATERIAL = 1052, + SVGA_3D_CMD_SETLIGHTDATA = 1053, + SVGA_3D_CMD_SETLIGHTENABLED = 1054, + SVGA_3D_CMD_SETVIEWPORT = 1055, + SVGA_3D_CMD_SETCLIPPLANE = 1056, + SVGA_3D_CMD_CLEAR = 1057, + SVGA_3D_CMD_PRESENT = 1058, + SVGA_3D_CMD_SHADER_DEFINE = 1059, + SVGA_3D_CMD_SHADER_DESTROY = 1060, + SVGA_3D_CMD_SET_SHADER = 1061, + SVGA_3D_CMD_SET_SHADER_CONST = 1062, + SVGA_3D_CMD_DRAW_PRIMITIVES = 1063, + SVGA_3D_CMD_SETSCISSORRECT = 1064, + SVGA_3D_CMD_BEGIN_QUERY = 1065, + SVGA_3D_CMD_END_QUERY = 1066, + SVGA_3D_CMD_WAIT_FOR_QUERY = 1067, + SVGA_3D_CMD_PRESENT_READBACK = 1068, + SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069, + SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070, + SVGA_3D_CMD_GENERATE_MIPMAPS = 1071, + SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072, + SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073, + SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074, + SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075, + SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076, + SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077, + SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078, + SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079, + SVGA_3D_CMD_ACTIVATE_SURFACE = 1080, + SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081, + SVGA_3D_CMD_SCREEN_DMA = 1082, + SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083, + SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084, + + SVGA_3D_CMD_LOGICOPS_BITBLT = 1085, + SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086, + SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087, + SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088, + SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089, + SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090, + + SVGA_3D_CMD_SET_OTABLE_BASE = 1091, + SVGA_3D_CMD_READBACK_OTABLE = 1092, + + SVGA_3D_CMD_DEFINE_GB_MOB = 1093, + SVGA_3D_CMD_DESTROY_GB_MOB = 1094, + SVGA_3D_CMD_DEAD3 = 1095, + SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096, + + SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097, + SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098, + SVGA_3D_CMD_BIND_GB_SURFACE = 1099, + SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100, + SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101, + SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102, + SVGA_3D_CMD_READBACK_GB_IMAGE = 1103, + SVGA_3D_CMD_READBACK_GB_SURFACE = 1104, + SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105, + SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106, + + SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107, + SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108, + SVGA_3D_CMD_BIND_GB_CONTEXT = 1109, + SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110, + SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111, + + SVGA_3D_CMD_DEFINE_GB_SHADER = 1112, + SVGA_3D_CMD_DESTROY_GB_SHADER = 1113, + SVGA_3D_CMD_BIND_GB_SHADER = 1114, + + SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115, + + SVGA_3D_CMD_BEGIN_GB_QUERY = 1116, + SVGA_3D_CMD_END_GB_QUERY = 1117, + SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118, + + SVGA_3D_CMD_NOP = 1119, + + SVGA_3D_CMD_ENABLE_GART = 1120, + SVGA_3D_CMD_DISABLE_GART = 1121, + SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122, + SVGA_3D_CMD_UNMAP_GART_RANGE = 1123, + + SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124, + SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125, + SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126, + SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127, + + SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128, + SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129, + + SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130, + + SVGA_3D_CMD_GB_SCREEN_DMA = 1131, + SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132, + SVGA_3D_CMD_GB_MOB_FENCE = 1133, + SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134, + SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135, + SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136, + SVGA_3D_CMD_NOP_ERROR = 1137, + + SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138, + SVGA_3D_CMD_SET_VERTEX_DECLS = 1139, + SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140, + SVGA_3D_CMD_DRAW = 1141, + SVGA_3D_CMD_DRAW_INDEXED = 1142, + + /* + * DX10 Commands + */ + SVGA_3D_CMD_DX_MIN = 1143, + SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143, + SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144, + SVGA_3D_CMD_DX_BIND_CONTEXT = 1145, + SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146, + SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147, + SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148, + SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149, + SVGA_3D_CMD_DX_SET_SHADER = 1150, + SVGA_3D_CMD_DX_SET_SAMPLERS = 1151, + SVGA_3D_CMD_DX_DRAW = 1152, + SVGA_3D_CMD_DX_DRAW_INDEXED = 1153, + SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154, + SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155, + SVGA_3D_CMD_DX_DRAW_AUTO = 1156, + SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157, + SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158, + SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159, + SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160, + SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161, + SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162, + SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163, + SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164, + SVGA_3D_CMD_DX_DEFINE_QUERY = 1165, + SVGA_3D_CMD_DX_DESTROY_QUERY = 1166, + SVGA_3D_CMD_DX_BIND_QUERY = 1167, + SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168, + SVGA_3D_CMD_DX_BEGIN_QUERY = 1169, + SVGA_3D_CMD_DX_END_QUERY = 1170, + SVGA_3D_CMD_DX_READBACK_QUERY = 1171, + SVGA_3D_CMD_DX_SET_PREDICATION = 1172, + SVGA_3D_CMD_DX_SET_SOTARGETS = 1173, + SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174, + SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175, + SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176, + SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177, + SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178, + SVGA_3D_CMD_DX_PRED_COPY = 1179, + SVGA_3D_CMD_DX_STRETCHBLT = 1180, + SVGA_3D_CMD_DX_GENMIPS = 1181, + SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182, + SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183, + SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184, + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185, + SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186, + SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187, + SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188, + SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189, + SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190, + SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191, + SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192, + SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193, + SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194, + SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195, + SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196, + SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197, + SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198, + SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199, + SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200, + SVGA_3D_CMD_DX_DEFINE_SHADER = 1201, + SVGA_3D_CMD_DX_DESTROY_SHADER = 1202, + SVGA_3D_CMD_DX_BIND_SHADER = 1203, + SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204, + SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205, + SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206, + SVGA_3D_CMD_DX_SET_COTABLE = 1207, + SVGA_3D_CMD_DX_READBACK_COTABLE = 1208, + SVGA_3D_CMD_DX_BUFFER_COPY = 1209, + SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210, + SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211, + SVGA_3D_CMD_DX_MOVE_QUERY = 1212, + SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213, + SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214, + SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215, + SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216, + SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217, + SVGA_3D_CMD_DX_HINT = 1218, + SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219, + SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220, + SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221, + SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222, + + /* + * Reserve some IDs to be used for the DX11 shader types. + */ + SVGA_3D_CMD_DX_RESERVED1 = 1223, + SVGA_3D_CMD_DX_RESERVED2 = 1224, + SVGA_3D_CMD_DX_RESERVED3 = 1225, + + SVGA_3D_CMD_DX_MAX = 1226, + SVGA_3D_CMD_MAX = 1226, + SVGA_3D_CMD_FUTURE_MAX = 3000 +} SVGAFifo3dCmdId; + +/* + * FIFO command format definitions: + */ + +/* + * The data size header following cmdNum for every 3d command + */ +typedef +#include "vmware_pack_begin.h" +struct { + uint32 id; + uint32 size; +} +#include "vmware_pack_end.h" +SVGA3dCmdHeader; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 numMipLevels; +} +#include "vmware_pack_end.h" +SVGA3dSurfaceFace; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; + SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurfaceFormat format; + /* + * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace + * structures must have the same value of numMipLevels field. + * Otherwise, all but the first SVGA3dSurfaceFace structures must have the + * numMipLevels set to 0. + */ + SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; + /* + * Followed by an SVGA3dSize structure for each mip level in each face. + * + * A note on surface sizes: Sizes are always specified in pixels, + * even if the true surface size is not a multiple of the minimum + * block size of the surface's format. For example, a 3x3x1 DXT1 + * compressed texture would actually be stored as a 4x4x1 image in + * memory. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; + SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurfaceFormat format; + /* + * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace + * structures must have the same value of numMipLevels field. + * Otherwise, all but the first SVGA3dSurfaceFace structures must have the + * numMipLevels set to 0. + */ + SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + /* + * Followed by an SVGA3dSize structure for each mip level in each face. + * + * A note on surface sizes: Sizes are always specified in pixels, + * even if the true surface size is not a multiple of the minimum + * block size of the surface's format. For example, a 3x3x1 DXT1 + * compressed texture would actually be stored as a 4x4x1 image in + * memory. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dClearFlag clearFlag; + uint32 color; + float depth; + uint32 stencil; + /* Followed by variable number of SVGA3dRect structures */ +} +#include "vmware_pack_end.h" +SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dLightType type; + SVGA3dBool inWorldSpace; + float diffuse[4]; + float specular[4]; + float ambient[4]; + float position[4]; + float direction[4]; + float range; + float falloff; + float attenuation0; + float attenuation1; + float attenuation2; + float theta; + float phi; +} +#include "vmware_pack_end.h" +SVGA3dLightData; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; + /* Followed by variable number of SVGA3dCopyRect structures */ +} +#include "vmware_pack_end.h" +SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dRenderStateName state; + union { + uint32 uintValue; + float floatValue; + }; +} +#include "vmware_pack_end.h" +SVGA3dRenderState; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + /* Followed by variable number of SVGA3dRenderState structures */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dRenderTargetType type; + SVGA3dSurfaceImageId target; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dest; + /* Followed by variable number of SVGA3dCopyBox structures */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dest; + SVGA3dBox boxSrc; + SVGA3dBox boxDest; + SVGA3dStretchBltMode mode; +} +#include "vmware_pack_end.h" +SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */ + +typedef +#include "vmware_pack_begin.h" +struct { + /* + * If the discard flag is present in a surface DMA operation, the host may + * discard the contents of the current mipmap level and face of the target + * surface before applying the surface DMA contents. + */ + uint32 discard : 1; + + /* + * If the unsynchronized flag is present, the host may perform this upload + * without syncing to pending reads on this surface. + */ + uint32 unsynchronized : 1; + + /* + * Guests *MUST* set the reserved bits to 0 before submitting the command + * suffix as future flags may occupy these bits. + */ + uint32 reserved : 30; +} +#include "vmware_pack_end.h" +SVGA3dSurfaceDMAFlags; + +typedef +#include "vmware_pack_begin.h" +struct { + SVGAGuestImage guest; + SVGA3dSurfaceImageId host; + SVGA3dTransferType transfer; + /* + * Followed by variable number of SVGA3dCopyBox structures. For consistency + * in all clipping logic and coordinate translation, we define the + * "source" in each copyBox as the guest image and the + * "destination" as the host image, regardless of transfer + * direction. + * + * For efficiency, the SVGA3D device is free to copy more data than + * specified. For example, it may round copy boxes outwards such + * that they lie on particular alignment boundaries. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */ + +/* + * SVGA3dCmdSurfaceDMASuffix -- + * + * This is a command suffix that will appear after a SurfaceDMA command in + * the FIFO. It contains some extra information that hosts may use to + * optimize performance or protect the guest. This suffix exists to preserve + * backwards compatibility while also allowing for new functionality to be + * implemented. + */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 suffixSize; + + /* + * The maximum offset is used to determine the maximum offset from the + * guestPtr base address that will be accessed or written to during this + * surfaceDMA. If the suffix is supported, the host will respect this + * boundary while performing surface DMAs. + * + * Defaults to MAX_UINT32 + */ + uint32 maximumOffset; + + /* + * A set of flags that describes optimizations that the host may perform + * while performing this surface DMA operation. The guest should never rely + * on behaviour that is different when these flags are set for correctness. + * + * Defaults to 0 + */ + SVGA3dSurfaceDMAFlags flags; +} +#include "vmware_pack_end.h" +SVGA3dCmdSurfaceDMASuffix; + +/* + * SVGA_3D_CMD_DRAW_PRIMITIVES -- + * + * This command is the SVGA3D device's generic drawing entry point. + * It can draw multiple ranges of primitives, optionally using an + * index buffer, using an arbitrary collection of vertex buffers. + * + * Each SVGA3dVertexDecl defines a distinct vertex array to bind + * during this draw call. The declarations specify which surface + * the vertex data lives in, what that vertex data is used for, + * and how to interpret it. + * + * Each SVGA3dPrimitiveRange defines a collection of primitives + * to render using the same vertex arrays. An index buffer is + * optional. + */ + +typedef +#include "vmware_pack_begin.h" +struct { + /* + * A range hint is an optional specification for the range of indices + * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed + * that the entire array will be used. + * + * These are only hints. The SVGA3D device may use them for + * performance optimization if possible, but it's also allowed to + * ignore these values. + */ + uint32 first; + uint32 last; +} +#include "vmware_pack_end.h" +SVGA3dArrayRangeHint; + +typedef +#include "vmware_pack_begin.h" +struct { + /* + * Define the origin and shape of a vertex or index array. Both + * 'offset' and 'stride' are in bytes. The provided surface will be + * reinterpreted as a flat array of bytes in the same format used + * by surface DMA operations. To avoid unnecessary conversions, the + * surface should be created with the SVGA3D_BUFFER format. + * + * Index 0 in the array starts 'offset' bytes into the surface. + * Index 1 begins at byte 'offset + stride', etc. Array indices may + * not be negative. + */ + uint32 surfaceId; + uint32 offset; + uint32 stride; +} +#include "vmware_pack_end.h" +SVGA3dArray; + +typedef +#include "vmware_pack_begin.h" +struct { + /* + * Describe a vertex array's data type, and define how it is to be + * used by the fixed function pipeline or the vertex shader. It + * isn't useful to have two VertexDecls with the same + * VertexArrayIdentity in one draw call. + */ + SVGA3dDeclType type; + SVGA3dDeclMethod method; + SVGA3dDeclUsage usage; + uint32 usageIndex; +} +#include "vmware_pack_end.h" +SVGA3dVertexArrayIdentity; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dVertexDecl { + SVGA3dVertexArrayIdentity identity; + SVGA3dArray array; + SVGA3dArrayRangeHint rangeHint; +} +#include "vmware_pack_end.h" +SVGA3dVertexDecl; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dPrimitiveRange { + /* + * Define a group of primitives to render, from sequential indices. + * + * The value of 'primitiveType' and 'primitiveCount' imply the + * total number of vertices that will be rendered. + */ + SVGA3dPrimitiveType primType; + uint32 primitiveCount; + + /* + * Optional index buffer. If indexArray.surfaceId is + * SVGA3D_INVALID_ID, we render without an index buffer. Rendering + * without an index buffer is identical to rendering with an index + * buffer containing the sequence [0, 1, 2, 3, ...]. + * + * If an index buffer is in use, indexWidth specifies the width in + * bytes of each index value. It must be less than or equal to + * indexArray.stride. + * + * (Currently, the SVGA3D device requires index buffers to be tightly + * packed. In other words, indexWidth == indexArray.stride) + */ + SVGA3dArray indexArray; + uint32 indexWidth; + + /* + * Optional index bias. This number is added to all indices from + * indexArray before they are used as vertex array indices. This + * can be used in multiple ways: + * + * - When not using an indexArray, this bias can be used to + * specify where in the vertex arrays to begin rendering. + * + * - A positive number here is equivalent to increasing the + * offset in each vertex array. + * + * - A negative number can be used to render using a small + * vertex array and an index buffer that contains large + * values. This may be used by some applications that + * crop a vertex buffer without modifying their index + * buffer. + * + * Note that rendering with a negative bias value may be slower and + * use more memory than rendering with a positive or zero bias. + */ + int32 indexBias; +} +#include "vmware_pack_end.h" +SVGA3dPrimitiveRange; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 numVertexDecls; + uint32 numRanges; + + /* + * There are two variable size arrays after the + * SVGA3dCmdDrawPrimitives structure. In order, + * they are: + * + * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than + * SVGA3D_MAX_VERTEX_ARRAYS; + * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than + * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES; + * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains + * the frequency divisor for the corresponding vertex decl). + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + + uint32 primitiveCount; /* How many primitives to render */ + uint32 startVertexLocation; /* Which vertex do we start rendering at. */ + + uint8 primitiveType; /* SVGA3dPrimitiveType */ + uint8 padding[3]; +} +#include "vmware_pack_end.h" +SVGA3dCmdDraw; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + + uint8 primitiveType; /* SVGA3dPrimitiveType */ + + uint32 indexBufferSid; /* Valid index buffer sid. */ + uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */ + /* always 0 for DX9 guests, non-zero for OpenGL */ + /* guests. We can't represent non-multiple of */ + /* stride offsets in D3D9Renderer... */ + uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */ + + int32 baseVertexLocation; /* Bias applied to the index when selecting a */ + /* vertex from the streams, may be negative */ + + uint32 primitiveCount; /* How many primitives to render */ + uint32 pad0; + uint16 pad1; +} +#include "vmware_pack_end.h" +SVGA3dCmdDrawIndexed; + +typedef +#include "vmware_pack_begin.h" +struct { + /* + * Describe a vertex array's data type, and define how it is to be + * used by the fixed function pipeline or the vertex shader. It + * isn't useful to have two VertexDecls with the same + * VertexArrayIdentity in one draw call. + */ + uint16 streamOffset; + uint8 stream; + uint8 type; /* SVGA3dDeclType */ + uint8 method; /* SVGA3dDeclMethod */ + uint8 usage; /* SVGA3dDeclUsage */ + uint8 usageIndex; + uint8 padding; + +} +#include "vmware_pack_end.h" +SVGA3dVertexElement; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + + uint32 numElements; + + /* + * Followed by numElements SVGA3dVertexElement structures. + * + * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements + * are cleared and will not be used by following draws. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSetVertexDecls; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; + uint32 stride; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGA3dVertexStream; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + + uint32 numStreams; + /* + * Followed by numStream SVGA3dVertexStream structures. + * + * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams + * are cleared and will not be used by following draws. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSetVertexStreams; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 numDivisors; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetVertexDivisors; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 stage; + SVGA3dTextureStateName name; + union { + uint32 value; + float floatValue; + }; +} +#include "vmware_pack_end.h" +SVGA3dTextureState; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + /* Followed by variable number of SVGA3dTextureState structures */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dTransformType type; + float matrix[16]; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */ + +typedef +#include "vmware_pack_begin.h" +struct { + float min; + float max; +} +#include "vmware_pack_end.h" +SVGA3dZRange; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dZRange zRange; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */ + +typedef +#include "vmware_pack_begin.h" +struct { + float diffuse[4]; + float ambient[4]; + float specular[4]; + float emissive[4]; + float shininess; +} +#include "vmware_pack_end.h" +SVGA3dMaterial; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dFace face; + SVGA3dMaterial material; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 index; + SVGA3dLightData data; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 index; + uint32 enabled; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dRect rect; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dRect rect; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 index; + float plane[4]; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 shid; + SVGA3dShaderType type; + /* Followed by variable number of DWORDs for shader bycode */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 shid; + SVGA3dShaderType type; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 reg; /* register number */ + SVGA3dShaderType type; + SVGA3dShaderConstType ctype; + uint32 values[4]; + + /* + * Followed by a variable number of additional values. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dShaderType type; + uint32 shid; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dQueryType type; +} +#include "vmware_pack_end.h" +SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dQueryType type; + SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */ +} +#include "vmware_pack_end.h" +SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */ + + +/* + * SVGA3D_CMD_WAIT_FOR_QUERY -- + * + * Will read the SVGA3dQueryResult structure pointed to by guestResult, + * and if the state member is set to anything else than + * SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op. + * + * Otherwise, in addition to the query explicitly waited for, + * All queries with the same type and issued with the same cid, for which + * an SVGA_3D_CMD_END_QUERY command has previously been sent, will + * be finished after execution of this command. + * + * A query will be identified by the gmrId and offset of the guestResult + * member. If the device can't find an SVGA_3D_CMD_END_QUERY that has + * been sent previously with an indentical gmrId and offset, it will + * effectively end all queries with an identical type issued with the + * same cid, and the SVGA3dQueryResult structure pointed to by + * guestResult will not be written to. This property can be used to + * implement a query barrier for a given cid and query type. + */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; /* Same parameters passed to END_QUERY */ + SVGA3dQueryType type; + SVGAGuestPtr guestResult; +} +#include "vmware_pack_end.h" +SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 totalSize; /* Set by guest before query is ended. */ + SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */ + union { /* Set by host on exit from PENDING state */ + uint32 result32; + uint32 queryCookie; /* May be used to identify which QueryGetData this + result corresponds to. */ + }; +} +#include "vmware_pack_end.h" +SVGA3dQueryResult; + + +/* + * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN -- + * + * This is a blit from an SVGA3D surface to a Screen Object. + * This blit must be directed at a specific screen. + * + * The blit copies from a rectangular region of an SVGA3D surface + * image to a rectangular region of a screen. + * + * This command takes an optional variable-length list of clipping + * rectangles after the body of the command. If no rectangles are + * specified, there is no clipping region. The entire destRect is + * drawn to. If one or more rectangles are included, they describe + * a clipping region. The clip rectangle coordinates are measured + * relative to the top-left corner of destRect. + * + * The srcImage must be from mip=0 face=0. + * + * This supports scaling if the src and dest are of different sizes. + * + * Availability: + * SVGA_FIFO_CAP_SCREEN_OBJECT + */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceImageId srcImage; + SVGASignedRect srcRect; + uint32 destScreenId; /* Screen Object ID */ + SVGASignedRect destRect; + /* Clipping: zero or more SVGASignedRects follow */ +} +#include "vmware_pack_end.h" +SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; + SVGA3dTextureFilter filter; +} +#include "vmware_pack_end.h" +SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */ + + + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdActivateSurface; /* SVGA_3D_CMD_ACTIVATE_SURFACE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDeactivateSurface; /* SVGA_3D_CMD_DEACTIVATE_SURFACE */ + +/* + * Screen DMA command + * + * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2. The SVGA_CAP_3D device + * cap bit is not required. + * + * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could + * be different, but it is required that guest makes sure refBuffer has + * exactly the same contents that were written to when last time screen DMA + * command is received by host. + * + * - changemap is generated by lib/blit, and it has the changes from last + * received screen DMA or more. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdScreenDMA { + uint32 screenId; + SVGAGuestImage refBuffer; + SVGAGuestImage destBuffer; + SVGAGuestImage changeMap; +} +#include "vmware_pack_end.h" +SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */ + +/* + * Set Unity Surface Cookie + * + * Associates the supplied cookie with the surface id for use with + * Unity. This cookie is a hint from guest to host, there is no way + * for the guest to readback the cookie and the host is free to drop + * the cookie association at will. The default value for the cookie + * on all surfaces is 0. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdSetUnitySurfaceCookie { + uint32 sid; + uint64 cookie; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */ + +/* + * Open a context-specific surface in a non-context-specific manner. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdOpenContextSurface { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */ + + +/* + * Logic ops + */ + +#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01) +#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01) +#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02) +#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01) + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdLogicOpsBitBlt { + /* + * All LogicOps surfaces are one-level + * surfaces so mipmap & face should always + * be zero. + */ + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + SVGA3dLogicOp logicOp; + /* Followed by variable number of SVGA3dCopyBox structures */ +} +#include "vmware_pack_end.h" +SVGA3dCmdLogicOpsBitBlt; /* SVGA_3D_CMD_LOGICOPS_BITBLT */ + + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdLogicOpsTransBlt { + /* + * All LogicOps surfaces are one-level + * surfaces so mipmap & face should always + * be zero. + */ + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + uint32 color; + uint32 flags; + SVGA3dBox srcBox; + SVGA3dBox dstBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */ + + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdLogicOpsStretchBlt { + /* + * All LogicOps surfaces are one-level + * surfaces so mipmap & face should always + * be zero. + */ + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + uint16 mode; + uint16 flags; + SVGA3dBox srcBox; + SVGA3dBox dstBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */ + + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdLogicOpsColorFill { + /* + * All LogicOps surfaces are one-level + * surfaces so mipmap & face should always + * be zero. + */ + SVGA3dSurfaceImageId dst; + uint32 color; + SVGA3dLogicOp logicOp; + /* Followed by variable number of SVGA3dRect structures. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdLogicOpsColorFill; /* SVGA_3D_CMD_LOGICOPS_COLORFILL */ + + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdLogicOpsAlphaBlend { + /* + * All LogicOps surfaces are one-level + * surfaces so mipmap & face should always + * be zero. + */ + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + uint32 alphaVal; + uint32 flags; + SVGA3dBox srcBox; + SVGA3dBox dstBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */ + +#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF + +#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512 +#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16 + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdLogicOpsClearTypeBlend { + /* + * All LogicOps surfaces are one-level + * surfaces so mipmap & face should always + * be zero. + */ + SVGA3dSurfaceImageId tmp; + SVGA3dSurfaceImageId dst; + SVGA3dSurfaceImageId gammaSurf; + SVGA3dSurfaceImageId alphaSurf; + uint32 gamma; + uint32 color; + uint32 color2; + int32 alphaOffsetX; + int32 alphaOffsetY; + /* Followed by variable number of SVGA3dBox structures */ +} +#include "vmware_pack_end.h" +SVGA3dCmdLogicOpsClearTypeBlend; /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */ + + +/* + * Guest-backed objects definitions. + */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGAMobFormat ptDepth; + uint32 sizeInBytes; + PPN64 base; +} +#include "vmware_pack_end.h" +SVGAOTableMobEntry; +#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry)) + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceFormat format; + SVGA3dSurfaceFlags surfaceFlags; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + SVGAMobId mobid; + uint32 arraySize; + uint32 mobPitch; + uint32 pad[5]; +} +#include "vmware_pack_end.h" +SVGAOTableSurfaceEntry; +#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry)) + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGAOTableContextEntry; +#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry)) + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dShaderType type; + uint32 sizeInBytes; + uint32 offsetInBytes; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGAOTableShaderEntry; +#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry)) + +#define SVGA_STFLAG_PRIMARY (1 << 0) +typedef uint32 SVGAScreenTargetFlags; + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceImageId image; + uint32 width; + uint32 height; + int32 xRoot; + int32 yRoot; + SVGAScreenTargetFlags flags; + uint32 dpi; + uint32 pad[7]; +} +#include "vmware_pack_end.h" +SVGAOTableScreenTargetEntry; +#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \ + (sizeof(SVGAOTableScreenTargetEntry)) + +typedef +#include "vmware_pack_begin.h" +struct { + float value[4]; +} +#include "vmware_pack_end.h" +SVGA3dShaderConstFloat; + +typedef +#include "vmware_pack_begin.h" +struct { + int32 value[4]; +} +#include "vmware_pack_end.h" +SVGA3dShaderConstInt; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 value; +} +#include "vmware_pack_end.h" +SVGA3dShaderConstBool; + +typedef +#include "vmware_pack_begin.h" +struct { + uint16 streamOffset; + uint8 stream; + uint8 type; + uint8 methodUsage; + uint8 usageIndex; +} +#include "vmware_pack_end.h" +SVGAGBVertexElement; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 sid; + uint16 stride; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGAGBVertexStream; +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dRect viewport; + SVGA3dRect scissorRect; + SVGA3dZRange zRange; + + SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX]; + SVGAGBVertexElement decl1[4]; + + uint32 renderStates[SVGA3D_RS_MAX]; + SVGAGBVertexElement decl2[18]; + uint32 pad0[2]; + + struct { + SVGA3dFace face; + SVGA3dMaterial material; + } material; + + float clipPlanes[SVGA3D_NUM_CLIPPLANES][4]; + float matrices[SVGA3D_TRANSFORM_MAX][16]; + + SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS]; + SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS]; + + /* + * Shaders currently bound + */ + uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX]; + SVGAGBVertexElement decl3[10]; + uint32 pad1[3]; + + uint32 occQueryActive; + uint32 occQueryValue; + + /* + * Int/Bool Shader constants + */ + SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX]; + SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX]; + uint16 pShaderBValues; + uint16 vShaderBValues; + + + SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS]; + SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS]; + uint32 numVertexDecls; + uint32 numVertexStreams; + uint32 numVertexDivisors; + uint32 pad2[30]; + + /* + * Texture Stages + * + * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the + * textureStages array. + * SVGA3D_TS_COLOR_KEY is in tsColorKey. + */ + uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS]; + uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1]; + uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS]; + + /* + * Float Shader constants. + */ + SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX]; + SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX]; +} +#include "vmware_pack_end.h" +SVGAGBContextData; +#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData)) + +/* + * SVGA3dCmdSetOTableBase -- + * + * This command allows the guest to specify the base PPN of the + * specified object table. + */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGAOTableType type; + PPN baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGAOTableType type; + PPN64 baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} +#include "vmware_pack_end.h" +SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGAOTableType type; +} +#include "vmware_pack_end.h" +SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ + +/* + * Define a memory object (Mob) in the OTable. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBMob { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN base; + uint32 sizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ + + +/* + * Destroys an object in the OTable. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDestroyGBMob { + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ + + +/* + * Define a memory object (Mob) in the OTable with a PPN64 base. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBMob64 { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN64 base; + uint32 sizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ + +/* + * Redefine an object in the OTable with PPN64 base. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdRedefineGBMob64 { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN64 base; + uint32 sizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ + +/* + * Notification that the page tables have been modified. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdUpdateGBMobMapping { + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ + +/* + * Define a guest-backed surface. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBSurface { + uint32 sid; + SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ + +/* + * Destroy a guest-backed surface. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDestroyGBSurface { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ + +/* + * Bind a guest-backed surface to a mob. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdBindGBSurface { + uint32 sid; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdBindGBSurfaceWithPitch { + uint32 sid; + SVGAMobId mobid; + uint32 baseLevelPitch; +} +#include "vmware_pack_end.h" +SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */ + +/* + * Conditionally bind a mob to a guest-backed surface if testMobid + * matches the currently bound mob. Optionally issue a + * readback/update on the surface while it is still bound to the old + * mobid if the mobid is changed by this command. + */ + +#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) +#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1) + +typedef +#include "vmware_pack_begin.h" +struct{ + uint32 sid; + SVGAMobId testMobid; + SVGAMobId mobid; + uint32 flags; +} +#include "vmware_pack_end.h" +SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ + +/* + * Update an image in a guest-backed surface. + * (Inform the device that the guest-contents have been updated.) + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdUpdateGBImage { + SVGA3dSurfaceImageId image; + SVGA3dBox box; +} +#include "vmware_pack_end.h" +SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ + +/* + * Update an entire guest-backed surface. + * (Inform the device that the guest-contents have been updated.) + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdUpdateGBSurface { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ + +/* + * Readback an image in a guest-backed surface. + * (Request the device to flush the dirty contents into the guest.) + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdReadbackGBImage { + SVGA3dSurfaceImageId image; +} +#include "vmware_pack_end.h" +SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE */ + +/* + * Readback an entire guest-backed surface. + * (Request the device to flush the dirty contents into the guest.) + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdReadbackGBSurface { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ + +/* + * Readback a sub rect of an image in a guest-backed surface. After + * issuing this command the driver is required to issue an update call + * of the same region before issuing any other commands that reference + * this surface or rendering is not guaranteed. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdReadbackGBImagePartial { + SVGA3dSurfaceImageId image; + SVGA3dBox box; + uint32 invertBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ + + +/* + * Invalidate an image in a guest-backed surface. + * (Notify the device that the contents can be lost.) + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdInvalidateGBImage { + SVGA3dSurfaceImageId image; +} +#include "vmware_pack_end.h" +SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ + +/* + * Invalidate an entire guest-backed surface. + * (Notify the device that the contents if all images can be lost.) + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdInvalidateGBSurface { + uint32 sid; +} +#include "vmware_pack_end.h" +SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ + +/* + * Invalidate a sub rect of an image in a guest-backed surface. After + * issuing this command the driver is required to issue an update call + * of the same region before issuing any other commands that reference + * this surface or rendering is not guaranteed. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdInvalidateGBImagePartial { + SVGA3dSurfaceImageId image; + SVGA3dBox box; + uint32 invertBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ + + +/* + * Define a guest-backed context. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ + +/* + * Destroy a guest-backed context. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDestroyGBContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ + +/* + * Bind a guest-backed context. + * + * validContents should be set to 0 for new contexts, + * and 1 if this is an old context which is getting paged + * back on to the device. + * + * For new contexts, it is recommended that the driver + * issue commands to initialize all interesting state + * prior to rendering. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdBindGBContext { + uint32 cid; + SVGAMobId mobid; + uint32 validContents; +} +#include "vmware_pack_end.h" +SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ + +/* + * Readback a guest-backed context. + * (Request that the device flush the contents back into guest memory.) + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdReadbackGBContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ + +/* + * Invalidate a guest-backed context. + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdInvalidateGBContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ + +/* + * Define a guest-backed shader. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBShader { + uint32 shid; + SVGA3dShaderType type; + uint32 sizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ + +/* + * Bind a guest-backed shader. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdBindGBShader { + uint32 shid; + SVGAMobId mobid; + uint32 offsetInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ + +/* + * Destroy a guest-backed shader. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDestroyGBShader { + uint32 shid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + uint32 regStart; + SVGA3dShaderType shaderType; + SVGA3dShaderConstType constType; + + /* + * Followed by a variable number of shader constants. + * + * Note that FLOAT and INT constants are 4-dwords in length, while + * BOOL constants are 1-dword in length. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ + + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dQueryType type; +} +#include "vmware_pack_end.h" +SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dQueryType type; + SVGAMobId mobid; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ + + +/* + * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- + * + * The semantics of this command are identical to the + * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written + * to a Mob instead of a GMR. + */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGA3dQueryType type; + SVGAMobId mobid; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ + + +typedef +#include "vmware_pack_begin.h" +struct { + SVGAMobId mobid; + uint32 mustBeZero; + uint32 initialized; +} +#include "vmware_pack_end.h" +SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGAMobId mobid; + uint32 gartOffset; +} +#include "vmware_pack_end.h" +SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ + + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 gartOffset; + uint32 numPages; +} +#include "vmware_pack_end.h" +SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ + + +/* + * Screen Targets + */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 stid; + uint32 width; + uint32 height; + int32 xRoot; + int32 yRoot; + SVGAScreenTargetFlags flags; + + /* + * The physical DPI that the guest expects this screen displayed at. + * + * Guests which are not DPI-aware should set this to zero. + */ + uint32 dpi; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 stid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 stid; + SVGA3dSurfaceImageId image; +} +#include "vmware_pack_end.h" +SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 stid; + SVGA3dRect rect; +} +#include "vmware_pack_end.h" +SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdGBScreenDMA { + uint32 screenId; + uint32 dead; + SVGAMobId destMobID; + uint32 destPitch; + SVGAMobId changeMapMobID; +} +#include "vmware_pack_end.h" +SVGA3dCmdGBScreenDMA; /* SVGA_3D_CMD_GB_SCREEN_DMA */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 value; + uint32 mobId; + uint32 mobOffset; +} +#include "vmware_pack_end.h" +SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/ + +#endif /* _SVGA3D_CMD_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h new file mode 100644 index 000000000000..c18b663f360f --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h @@ -0,0 +1,457 @@ +/********************************************************** + * Copyright 1998-2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +/* + * svga3d_devcaps.h -- + * + * SVGA 3d caps definitions + */ + +#ifndef _SVGA3D_DEVCAPS_H_ +#define _SVGA3D_DEVCAPS_H_ + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_VMCORE + +#include "includeCheck.h" + +/* + * 3D Hardware Version + * + * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo + * register. Is set by the host and read by the guest. This lets + * us make new guest drivers which are backwards-compatible with old + * SVGA hardware revisions. It does not let us support old guest + * drivers. Good enough for now. + * + */ + +#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) +#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16) +#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF) + +typedef enum { + SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1), + SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2), + SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3), + SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), + SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), + SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), + SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1), + SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1, +} SVGA3dHardwareVersion; + +/* + * DevCap indexes. + */ + +typedef enum { + SVGA3D_DEVCAP_INVALID = ((uint32)-1), + SVGA3D_DEVCAP_3D = 0, + SVGA3D_DEVCAP_MAX_LIGHTS = 1, + + /* + * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of + * fixed-function texture units available. Each of these units + * work in both FFP and Shader modes, and they support texture + * transforms and texture coordinates. The host may have additional + * texture image units that are only usable with shaders. + */ + SVGA3D_DEVCAP_MAX_TEXTURES = 2, + SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3, + SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4, + SVGA3D_DEVCAP_VERTEX_SHADER = 5, + SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6, + SVGA3D_DEVCAP_FRAGMENT_SHADER = 7, + SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8, + SVGA3D_DEVCAP_S23E8_TEXTURES = 9, + SVGA3D_DEVCAP_S10E5_TEXTURES = 10, + SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11, + SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, + SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, + SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, + SVGA3D_DEVCAP_QUERY_TYPES = 15, + SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16, + SVGA3D_DEVCAP_MAX_POINT_SIZE = 17, + SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18, + SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19, + SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20, + SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21, + SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22, + SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23, + SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24, + SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25, + SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26, + SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27, + SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28, + SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29, + SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30, + SVGA3D_DEVCAP_TEXTURE_OPS = 31, + SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32, + SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33, + SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34, + SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35, + SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36, + SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37, + SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38, + SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39, + SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40, + SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41, + SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42, + SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43, + SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44, + SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45, + SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46, + SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47, + SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48, + SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49, + SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50, + SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51, + SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52, + SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53, + SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54, + SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55, + SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56, + SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57, + SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58, + SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59, + SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60, + SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61, + + /* + * There is a hole in our devcap definitions for + * historical reasons. + * + * Define a constant just for completeness. + */ + SVGA3D_DEVCAP_MISSING62 = 62, + + SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63, + + /* + * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color + * render targets. This does not include the depth or stencil targets. + */ + SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64, + + SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65, + SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66, + SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67, + SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68, + SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69, + SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70, + SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71, + SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72, + SVGA3D_DEVCAP_SUPERSAMPLE = 73, + SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74, + SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75, + SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76, + + /* + * This is the maximum number of SVGA context IDs that the guest + * can define using SVGA_3D_CMD_CONTEXT_DEFINE. + */ + SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77, + + /* + * This is the maximum number of SVGA surface IDs that the guest + * can define using SVGA_3D_CMD_SURFACE_DEFINE*. + */ + SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78, + + SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79, + SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80, + SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81, + + SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82, + SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83, + + /* + * Deprecated. + */ + SVGA3D_DEVCAP_DEAD1 = 84, + + /* + * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements + * ored together, one for every type of video decoding supported. + */ + SVGA3D_DEVCAP_VIDEO_DECODE = 85, + + /* + * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements + * ored together, one for every type of video processing supported. + */ + SVGA3D_DEVCAP_VIDEO_PROCESS = 86, + + SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ + SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ + SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ + SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ + + SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, + + /* + * Does the host support the SVGA logic ops commands? + */ + SVGA3D_DEVCAP_LOGICOPS = 92, + + /* + * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported? + */ + SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */ + + /* + * Deprecated. + */ + SVGA3D_DEVCAP_DEAD2 = 94, + + /* + * Does the device support the DX commands? + */ + SVGA3D_DEVCAP_DX = 95, + + /* + * What is the maximum size of a texture array? + * + * (Even if this cap is zero, cubemaps are still allowed.) + */ + SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96, + + /* + * What is the maximum number of vertex buffers that can + * be used in the DXContext inputAssembly? + */ + SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97, + + /* + * What is the maximum number of constant buffers + * that can be expected to work correctly with a + * DX context? + */ + SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98, + + /* + * Does the device support provoking vertex control? + * If zero, the first vertex will always be the provoking vertex. + */ + SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99, + + SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100, + SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101, + SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102, + SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103, + SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104, + SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105, + SVGA3D_DEVCAP_DXFMT_Z_D32 = 106, + SVGA3D_DEVCAP_DXFMT_Z_D16 = 107, + SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108, + SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109, + SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110, + SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111, + SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112, + SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113, + SVGA3D_DEVCAP_DXFMT_DXT1 = 114, + SVGA3D_DEVCAP_DXFMT_DXT2 = 115, + SVGA3D_DEVCAP_DXFMT_DXT3 = 116, + SVGA3D_DEVCAP_DXFMT_DXT4 = 117, + SVGA3D_DEVCAP_DXFMT_DXT5 = 118, + SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119, + SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120, + SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121, + SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122, + SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123, + SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124, + SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125, + SVGA3D_DEVCAP_DXFMT_V8U8 = 126, + SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127, + SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128, + SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129, + SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130, + SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131, + SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132, + SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133, + SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134, + SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135, + SVGA3D_DEVCAP_DXFMT_BUFFER = 136, + SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137, + SVGA3D_DEVCAP_DXFMT_V16U16 = 138, + SVGA3D_DEVCAP_DXFMT_G16R16 = 139, + SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140, + SVGA3D_DEVCAP_DXFMT_UYVY = 141, + SVGA3D_DEVCAP_DXFMT_YUY2 = 142, + SVGA3D_DEVCAP_DXFMT_NV12 = 143, + SVGA3D_DEVCAP_DXFMT_AYUV = 144, + SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145, + SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146, + SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147, + SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148, + SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149, + SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150, + SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151, + SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152, + SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153, + SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154, + SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155, + SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156, + SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157, + SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158, + SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159, + SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160, + SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161, + SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162, + SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163, + SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164, + SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165, + SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166, + SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167, + SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168, + SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169, + SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170, + SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171, + SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172, + SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173, + SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174, + SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175, + SVGA3D_DEVCAP_DXFMT_R32_UINT = 176, + SVGA3D_DEVCAP_DXFMT_R32_SINT = 177, + SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178, + SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179, + SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180, + SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181, + SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182, + SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183, + SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184, + SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185, + SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186, + SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187, + SVGA3D_DEVCAP_DXFMT_R16_UINT = 188, + SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189, + SVGA3D_DEVCAP_DXFMT_R16_SINT = 190, + SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191, + SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192, + SVGA3D_DEVCAP_DXFMT_R8_UINT = 193, + SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194, + SVGA3D_DEVCAP_DXFMT_R8_SINT = 195, + SVGA3D_DEVCAP_DXFMT_P8 = 196, + SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197, + SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198, + SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199, + SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200, + SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201, + SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202, + SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203, + SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204, + SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205, + SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206, + SVGA3D_DEVCAP_DXFMT_ATI1 = 207, + SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208, + SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209, + SVGA3D_DEVCAP_DXFMT_ATI2 = 210, + SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211, + SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212, + SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213, + SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214, + SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215, + SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216, + SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217, + SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218, + SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219, + SVGA3D_DEVCAP_DXFMT_YV12 = 220, + SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221, + SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222, + SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223, + SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224, + SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225, + SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226, + SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227, + SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228, + SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229, + SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230, + SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231, + SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232, + SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233, + SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234, + SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235, + SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236, + SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237, + SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238, + SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239, + SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240, + SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241, + SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242, + SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243, + + SVGA3D_DEVCAP_MAX /* This must be the last index. */ +} SVGA3dDevCapIndex; + +/* + * Bit definitions for DXFMT devcaps + * + * + * SUPPORTED: Can the format be defined? + * SHADER_SAMPLE: Can the format be sampled from a shader? + * COLOR_RENDERTARGET: Can the format be a color render target? + * DEPTH_RENDERTARGET: Can the format be a depth render target? + * BLENDABLE: Is the format blendable? + * MIPS: Does the format support mip levels? + * ARRAY: Does the format support texture arrays? + * VOLUME: Does the format support having volume? + * MULTISAMPLE_2: Does the format support 2x multisample? + * MULTISAMPLE_4: Does the format support 4x multisample? + * MULTISAMPLE_8: Does the format support 8x multisample? + */ +#define SVGA3D_DXFMT_SUPPORTED (1 << 0) +#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1) +#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2) +#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3) +#define SVGA3D_DXFMT_BLENDABLE (1 << 4) +#define SVGA3D_DXFMT_MIPS (1 << 5) +#define SVGA3D_DXFMT_ARRAY (1 << 6) +#define SVGA3D_DXFMT_VOLUME (1 << 7) +#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8) +#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9) +#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10) +#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11) +#define SVGADX_DXFMT_MAX (1 << 12) + +/* + * Convenience mask for any multisample capability. + * + * The multisample bits imply both load and render capability. + */ +#define SVGA3D_DXFMT_MULTISAMPLE ( \ + SVGADX_DXFMT_MULTISAMPLE_2 | \ + SVGADX_DXFMT_MULTISAMPLE_4 | \ + SVGADX_DXFMT_MULTISAMPLE_8 ) + +typedef union { + Bool b; + uint32 u; + int32 i; + float f; +} SVGA3dDevCapResult; + +#endif /* _SVGA3D_DEVCAPS_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h new file mode 100644 index 000000000000..8c5ae608cfb4 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h @@ -0,0 +1,1487 @@ +/********************************************************** + * Copyright 2012-2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +/* + * svga3d_dx.h -- + * + * SVGA 3d hardware definitions for DX10 support. + */ + +#ifndef _SVGA3D_DX_H_ +#define _SVGA3D_DX_H_ + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_VMCORE +#include "includeCheck.h" + +#include "svga3d_limits.h" + +#define SVGA3D_INPUT_MIN 0 +#define SVGA3D_INPUT_PER_VERTEX_DATA 0 +#define SVGA3D_INPUT_PER_INSTANCE_DATA 1 +#define SVGA3D_INPUT_MAX 2 +typedef uint32 SVGA3dInputClassification; + +#define SVGA3D_RESOURCE_TYPE_MIN 1 +#define SVGA3D_RESOURCE_BUFFER 1 +#define SVGA3D_RESOURCE_TEXTURE1D 2 +#define SVGA3D_RESOURCE_TEXTURE2D 3 +#define SVGA3D_RESOURCE_TEXTURE3D 4 +#define SVGA3D_RESOURCE_TEXTURECUBE 5 +#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6 +#define SVGA3D_RESOURCE_BUFFEREX 6 +#define SVGA3D_RESOURCE_TYPE_MAX 7 +typedef uint32 SVGA3dResourceType; + +#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0 +#define SVGA3D_DEPTH_WRITE_MASK_ALL 1 +typedef uint8 SVGA3dDepthWriteMask; + +#define SVGA3D_FILTER_MIP_LINEAR (1 << 0) +#define SVGA3D_FILTER_MAG_LINEAR (1 << 2) +#define SVGA3D_FILTER_MIN_LINEAR (1 << 4) +#define SVGA3D_FILTER_ANISOTROPIC (1 << 6) +#define SVGA3D_FILTER_COMPARE (1 << 7) +typedef uint32 SVGA3dFilter; + +#define SVGA3D_CULL_INVALID 0 +#define SVGA3D_CULL_MIN 1 +#define SVGA3D_CULL_NONE 1 +#define SVGA3D_CULL_FRONT 2 +#define SVGA3D_CULL_BACK 3 +#define SVGA3D_CULL_MAX 4 +typedef uint8 SVGA3dCullMode; + +#define SVGA3D_COMPARISON_INVALID 0 +#define SVGA3D_COMPARISON_MIN 1 +#define SVGA3D_COMPARISON_NEVER 1 +#define SVGA3D_COMPARISON_LESS 2 +#define SVGA3D_COMPARISON_EQUAL 3 +#define SVGA3D_COMPARISON_LESS_EQUAL 4 +#define SVGA3D_COMPARISON_GREATER 5 +#define SVGA3D_COMPARISON_NOT_EQUAL 6 +#define SVGA3D_COMPARISON_GREATER_EQUAL 7 +#define SVGA3D_COMPARISON_ALWAYS 8 +#define SVGA3D_COMPARISON_MAX 9 +typedef uint8 SVGA3dComparisonFunc; + +#define SVGA3D_DX_MAX_VERTEXBUFFERS 32 +#define SVGA3D_DX_MAX_SOTARGETS 4 +#define SVGA3D_DX_MAX_SRVIEWS 128 +#define SVGA3D_DX_MAX_CONSTBUFFERS 16 +#define SVGA3D_DX_MAX_SAMPLERS 16 + +/* Id limits */ +static const uint32 SVGA3dBlendObjectCountPerContext = 4096; +static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096; + +typedef uint32 SVGA3dSurfaceId; +typedef uint32 SVGA3dShaderResourceViewId; +typedef uint32 SVGA3dRenderTargetViewId; +typedef uint32 SVGA3dDepthStencilViewId; + +typedef uint32 SVGA3dShaderId; +typedef uint32 SVGA3dElementLayoutId; +typedef uint32 SVGA3dSamplerId; +typedef uint32 SVGA3dBlendStateId; +typedef uint32 SVGA3dDepthStencilStateId; +typedef uint32 SVGA3dRasterizerStateId; +typedef uint32 SVGA3dQueryId; +typedef uint32 SVGA3dStreamOutputId; + +typedef union { + struct { + float r; + float g; + float b; + float a; + }; + + float value[4]; +} SVGA3dRGBAFloat; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 cid; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGAOTableDXContextEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineContext; /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyContext; /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */ + +/* + * Bind a DX context. + * + * validContents should be set to 0 for new contexts, + * and 1 if this is an old context which is getting paged + * back on to the device. + * + * For new contexts, it is recommended that the driver + * issue commands to initialize all interesting state + * prior to rendering. + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBindContext { + uint32 cid; + SVGAMobId mobid; + uint32 validContents; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBindContext; /* SVGA_3D_CMD_DX_BIND_CONTEXT */ + +/* + * Readback a DX context. + * (Request that the device flush the contents back into guest memory.) + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXReadbackContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXReadbackContext; /* SVGA_3D_CMD_DX_READBACK_CONTEXT */ + +/* + * Invalidate a guest-backed context. + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXInvalidateContext { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dReplyFormatData { + uint32 formatSupport; + uint32 msaa2xQualityLevels:5; + uint32 msaa4xQualityLevels:5; + uint32 msaa8xQualityLevels:5; + uint32 msaa16xQualityLevels:5; + uint32 msaa32xQualityLevels:5; + uint32 pad:7; +} +#include "vmware_pack_end.h" +SVGA3dReplyFormatData; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetSingleConstantBuffer { + uint32 slot; + SVGA3dShaderType type; + SVGA3dSurfaceId sid; + uint32 offsetInBytes; + uint32 sizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetSingleConstantBuffer; +/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetShaderResources { + uint32 startView; + SVGA3dShaderType type; + + /* + * Followed by a variable number of SVGA3dShaderResourceViewId's. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetShader { + SVGA3dShaderId shaderId; + SVGA3dShaderType type; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetSamplers { + uint32 startSampler; + SVGA3dShaderType type; + + /* + * Followed by a variable number of SVGA3dSamplerId's. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDraw { + uint32 vertexCount; + uint32 startVertexLocation; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDrawIndexed { + uint32 indexCount; + uint32 startIndexLocation; + int32 baseVertexLocation; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDrawInstanced { + uint32 vertexCountPerInstance; + uint32 instanceCount; + uint32 startVertexLocation; + uint32 startInstanceLocation; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDrawIndexedInstanced { + uint32 indexCountPerInstance; + uint32 instanceCount; + uint32 startIndexLocation; + int32 baseVertexLocation; + uint32 startInstanceLocation; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDrawAuto { + uint32 pad0; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetInputLayout { + SVGA3dElementLayoutId elementLayoutId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dVertexBuffer { + SVGA3dSurfaceId sid; + uint32 stride; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGA3dVertexBuffer; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetVertexBuffers { + uint32 startBuffer; + /* Followed by a variable number of SVGA3dVertexBuffer's. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetIndexBuffer { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetTopology { + SVGA3dPrimitiveType topology; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetRenderTargets { + SVGA3dDepthStencilViewId depthStencilViewId; + /* Followed by a variable number of SVGA3dRenderTargetViewId's. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetBlendState { + SVGA3dBlendStateId blendId; + float blendFactor[4]; + uint32 sampleMask; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetDepthStencilState { + SVGA3dDepthStencilStateId depthStencilId; + uint32 stencilRef; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetRasterizerState { + SVGA3dRasterizerStateId rasterizerId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */ + +#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0) +typedef uint32 SVGA3dDXQueryFlags; + +/* + * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device + * to track query state transitions, but are not intended to be used by the + * driver. + */ +#define SVGADX_QDSTATE_INVALID ((uint8)-1) /* Query has no state */ +#define SVGADX_QDSTATE_MIN 0 +#define SVGADX_QDSTATE_IDLE 0 /* Query hasn't started yet */ +#define SVGADX_QDSTATE_ACTIVE 1 /* Query is actively gathering data */ +#define SVGADX_QDSTATE_PENDING 2 /* Query is waiting for results */ +#define SVGADX_QDSTATE_FINISHED 3 /* Query has completed */ +#define SVGADX_QDSTATE_MAX 4 +typedef uint8 SVGADXQueryDeviceState; + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dQueryTypeUint8 type; + uint16 pad0; + SVGADXQueryDeviceState state; + SVGA3dDXQueryFlags flags; + SVGAMobId mobid; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGACOTableDXQueryEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineQuery { + SVGA3dQueryId queryId; + SVGA3dQueryType type; + SVGA3dDXQueryFlags flags; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyQuery { + SVGA3dQueryId queryId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBindQuery { + SVGA3dQueryId queryId; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetQueryOffset { + SVGA3dQueryId queryId; + uint32 mobOffset; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBeginQuery { + SVGA3dQueryId queryId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXEndQuery { + SVGA3dQueryId queryId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXReadbackQuery { + SVGA3dQueryId queryId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXMoveQuery { + SVGA3dQueryId queryId; + SVGAMobId mobid; + uint32 mobOffset; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBindAllQuery { + uint32 cid; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXReadbackAllQuery { + uint32 cid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetPredication { + SVGA3dQueryId queryId; + uint32 predicateValue; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */ + +typedef +#include "vmware_pack_begin.h" +struct MKS3dDXSOState { + uint32 offset; /* Starting offset */ + uint32 intOffset; /* Internal offset */ + uint32 vertexCount; /* vertices written */ + uint32 sizeInBytes; /* max bytes to write */ +} +#include "vmware_pack_end.h" +SVGA3dDXSOState; + +/* Set the offset field to this value to append SO values to the buffer */ +#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u) + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dSoTarget { + SVGA3dSurfaceId sid; + uint32 offset; + uint32 sizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dSoTarget; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetSOTargets { + uint32 pad0; + /* Followed by a variable number of SVGA3dSOTarget's. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dViewport +{ + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} +#include "vmware_pack_end.h" +SVGA3dViewport; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetViewports { + uint32 pad0; + /* Followed by a variable number of SVGA3dViewport's. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */ + +#define SVGA3D_DX_MAX_VIEWPORTS 16 + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetScissorRects { + uint32 pad0; + /* Followed by a variable number of SVGASignedRect's. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */ + +#define SVGA3D_DX_MAX_SCISSORRECTS 16 + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXClearRenderTargetView { + SVGA3dRenderTargetViewId renderTargetViewId; + SVGA3dRGBAFloat rgba; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXClearDepthStencilView { + uint16 flags; + uint16 stencil; + SVGA3dDepthStencilViewId depthStencilViewId; + float depth; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXPredCopyRegion { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dCopyBox box; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXPredCopyRegion; +/* SVGA_3D_CMD_DX_PRED_COPY_REGION */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXPredCopy { + SVGA3dSurfaceId dstSid; + SVGA3dSurfaceId srcSid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBufferCopy { + SVGA3dSurfaceId dest; + SVGA3dSurfaceId src; + uint32 destX; + uint32 srcX; + uint32 width; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBufferCopy; +/* SVGA_3D_CMD_DX_BUFFER_COPY */ + +typedef uint32 SVGA3dDXStretchBltMode; +#define SVGADX_STRETCHBLT_LINEAR (1 << 0) +#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1) + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXStretchBlt { + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dSurfaceId dstSid; + uint32 destSubResource; + SVGA3dBox boxSrc; + SVGA3dBox boxDest; + SVGA3dDXStretchBltMode mode; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXGenMips { + SVGA3dShaderResourceViewId shaderResourceViewId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */ + +/* + * Defines a resource/DX surface. Resources share the surfaceId namespace. + * + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBSurface_v2 { + uint32 sid; + SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + uint32 arraySize; + uint32 pad; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */ + +/* + * Update a sub-resource in a guest-backed resource. + * (Inform the device that the guest-contents have been updated.) + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXUpdateSubResource { + SVGA3dSurfaceId sid; + uint32 subResource; + SVGA3dBox box; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXUpdateSubResource; /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */ + +/* + * Readback a subresource in a guest-backed resource. + * (Request the device to flush the dirty contents into the guest.) + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXReadbackSubResource { + SVGA3dSurfaceId sid; + uint32 subResource; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXReadbackSubResource; /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */ + +/* + * Invalidate an image in a guest-backed surface. + * (Notify the device that the contents can be lost.) + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXInvalidateSubResource { + SVGA3dSurfaceId sid; + uint32 subResource; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */ + + +/* + * Raw byte wise transfer from a buffer surface into another surface + * of the requested box. + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXTransferFromBuffer { + SVGA3dSurfaceId srcSid; + uint32 srcOffset; + uint32 srcPitch; + uint32 srcSlicePitch; + SVGA3dSurfaceId destSid; + uint32 destSubResource; + SVGA3dBox destBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */ + + +/* + * Raw byte wise transfer from a buffer surface into another surface + * of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set. + * The context is implied from the command buffer header. + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXPredTransferFromBuffer { + SVGA3dSurfaceId srcSid; + uint32 srcOffset; + uint32 srcPitch; + uint32 srcSlicePitch; + SVGA3dSurfaceId destSid; + uint32 destSubResource; + SVGA3dBox destBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXPredTransferFromBuffer; +/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */ + + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSurfaceCopyAndReadback { + SVGA3dSurfaceId srcSid; + SVGA3dSurfaceId destSid; + SVGA3dCopyBox box; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSurfaceCopyAndReadback; +/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */ + + +typedef +#include "vmware_pack_begin.h" +struct { + union { + struct { + uint32 firstElement; + uint32 numElements; + uint32 pad0; + uint32 pad1; + } buffer; + struct { + uint32 mostDetailedMip; + uint32 firstArraySlice; + uint32 mipLevels; + uint32 arraySize; + } tex; + struct { + uint32 firstElement; + uint32 numElements; + uint32 flags; + uint32 pad0; + } bufferex; + }; +} +#include "vmware_pack_end.h" +SVGA3dShaderResourceViewDesc; + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + SVGA3dShaderResourceViewDesc desc; + uint32 pad; +} +#include "vmware_pack_end.h" +SVGACOTableDXSRViewEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineShaderResourceView { + SVGA3dShaderResourceViewId shaderResourceViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + + SVGA3dShaderResourceViewDesc desc; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineShaderResourceView; +/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyShaderResourceView { + SVGA3dShaderResourceViewId shaderResourceViewId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyShaderResourceView; +/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dRenderTargetViewDesc { + union { + struct { + uint32 firstElement; + uint32 numElements; + } buffer; + struct { + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + } tex; /* 1d, 2d, cube */ + struct { + uint32 mipSlice; + uint32 firstW; + uint32 wSize; + } tex3D; + }; +} +#include "vmware_pack_end.h" +SVGA3dRenderTargetViewDesc; + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + SVGA3dRenderTargetViewDesc desc; + uint32 pad[2]; +} +#include "vmware_pack_end.h" +SVGACOTableDXRTViewEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineRenderTargetView { + SVGA3dRenderTargetViewId renderTargetViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + + SVGA3dRenderTargetViewDesc desc; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineRenderTargetView; +/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyRenderTargetView { + SVGA3dRenderTargetViewId renderTargetViewId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyRenderTargetView; +/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */ + +/* + */ +#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01 +#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02 +#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03 +typedef uint8 SVGA3DCreateDSViewFlags; + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + SVGA3DCreateDSViewFlags flags; + uint8 pad0; + uint16 pad1; + uint32 pad2; +} +#include "vmware_pack_end.h" +SVGACOTableDXDSViewEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineDepthStencilView { + SVGA3dDepthStencilViewId depthStencilViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + SVGA3DCreateDSViewFlags flags; + uint8 pad0; + uint16 pad1; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineDepthStencilView; +/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyDepthStencilView { + SVGA3dDepthStencilViewId depthStencilViewId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyDepthStencilView; +/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dInputElementDesc { + uint32 inputSlot; + uint32 alignedByteOffset; + SVGA3dSurfaceFormat format; + SVGA3dInputClassification inputSlotClass; + uint32 instanceDataStepRate; + uint32 inputRegister; +} +#include "vmware_pack_end.h" +SVGA3dInputElementDesc; + +typedef +#include "vmware_pack_begin.h" +struct { + /* + * XXX: How many of these can there be? + */ + uint32 elid; + uint32 numDescs; + SVGA3dInputElementDesc desc[32]; + uint32 pad[62]; +} +#include "vmware_pack_end.h" +SVGACOTableDXElementLayoutEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineElementLayout { + SVGA3dElementLayoutId elementLayoutId; + /* Followed by a variable number of SVGA3dInputElementDesc's. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineElementLayout; +/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyElementLayout { + SVGA3dElementLayoutId elementLayoutId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyElementLayout; +/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */ + + +#define SVGA3D_DX_MAX_RENDER_TARGETS 8 + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dDXBlendStatePerRT { + uint8 blendEnable; + uint8 srcBlend; + uint8 destBlend; + uint8 blendOp; + uint8 srcBlendAlpha; + uint8 destBlendAlpha; + uint8 blendOpAlpha; + uint8 renderTargetWriteMask; + uint8 logicOpEnable; + uint8 logicOp; + uint16 pad0; +} +#include "vmware_pack_end.h" +SVGA3dDXBlendStatePerRT; + +typedef +#include "vmware_pack_begin.h" +struct { + uint8 alphaToCoverageEnable; + uint8 independentBlendEnable; + uint16 pad0; + SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS]; + uint32 pad1[7]; +} +#include "vmware_pack_end.h" +SVGACOTableDXBlendStateEntry; + +/* + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineBlendState { + SVGA3dBlendStateId blendId; + uint8 alphaToCoverageEnable; + uint8 independentBlendEnable; + uint16 pad0; + SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS]; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyBlendState { + SVGA3dBlendStateId blendId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint8 depthEnable; + SVGA3dDepthWriteMask depthWriteMask; + SVGA3dComparisonFunc depthFunc; + uint8 stencilEnable; + uint8 frontEnable; + uint8 backEnable; + uint8 stencilReadMask; + uint8 stencilWriteMask; + + uint8 frontStencilFailOp; + uint8 frontStencilDepthFailOp; + uint8 frontStencilPassOp; + SVGA3dComparisonFunc frontStencilFunc; + + uint8 backStencilFailOp; + uint8 backStencilDepthFailOp; + uint8 backStencilPassOp; + SVGA3dComparisonFunc backStencilFunc; +} +#include "vmware_pack_end.h" +SVGACOTableDXDepthStencilEntry; + +/* + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineDepthStencilState { + SVGA3dDepthStencilStateId depthStencilId; + + uint8 depthEnable; + SVGA3dDepthWriteMask depthWriteMask; + SVGA3dComparisonFunc depthFunc; + uint8 stencilEnable; + uint8 frontEnable; + uint8 backEnable; + uint8 stencilReadMask; + uint8 stencilWriteMask; + + uint8 frontStencilFailOp; + uint8 frontStencilDepthFailOp; + uint8 frontStencilPassOp; + SVGA3dComparisonFunc frontStencilFunc; + + uint8 backStencilFailOp; + uint8 backStencilDepthFailOp; + uint8 backStencilPassOp; + SVGA3dComparisonFunc backStencilFunc; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineDepthStencilState; +/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyDepthStencilState { + SVGA3dDepthStencilStateId depthStencilId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyDepthStencilState; +/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint8 fillMode; + SVGA3dCullMode cullMode; + uint8 frontCounterClockwise; + uint8 provokingVertexLast; + int32 depthBias; + float depthBiasClamp; + float slopeScaledDepthBias; + uint8 depthClipEnable; + uint8 scissorEnable; + uint8 multisampleEnable; + uint8 antialiasedLineEnable; + float lineWidth; + uint8 lineStippleEnable; + uint8 lineStippleFactor; + uint16 lineStipplePattern; + uint32 forcedSampleCount; +} +#include "vmware_pack_end.h" +SVGACOTableDXRasterizerStateEntry; + +/* + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineRasterizerState { + SVGA3dRasterizerStateId rasterizerId; + + uint8 fillMode; + SVGA3dCullMode cullMode; + uint8 frontCounterClockwise; + uint8 provokingVertexLast; + int32 depthBias; + float depthBiasClamp; + float slopeScaledDepthBias; + uint8 depthClipEnable; + uint8 scissorEnable; + uint8 multisampleEnable; + uint8 antialiasedLineEnable; + float lineWidth; + uint8 lineStippleEnable; + uint8 lineStippleFactor; + uint16 lineStipplePattern; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineRasterizerState; +/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyRasterizerState { + SVGA3dRasterizerStateId rasterizerId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyRasterizerState; +/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dFilter filter; + uint8 addressU; + uint8 addressV; + uint8 addressW; + uint8 pad0; + float mipLODBias; + uint8 maxAnisotropy; + SVGA3dComparisonFunc comparisonFunc; + uint16 pad1; + SVGA3dRGBAFloat borderColor; + float minLOD; + float maxLOD; + uint32 pad2[6]; +} +#include "vmware_pack_end.h" +SVGACOTableDXSamplerEntry; + +/* + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineSamplerState { + SVGA3dSamplerId samplerId; + SVGA3dFilter filter; + uint8 addressU; + uint8 addressV; + uint8 addressW; + uint8 pad0; + float mipLODBias; + uint8 maxAnisotropy; + SVGA3dComparisonFunc comparisonFunc; + uint16 pad1; + SVGA3dRGBAFloat borderColor; + float minLOD; + float maxLOD; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroySamplerState { + SVGA3dSamplerId samplerId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */ + +/* + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dSignatureEntry { + uint8 systemValue; + uint8 reg; /* register is a reserved word */ + uint16 mask; + uint8 registerComponentType; + uint8 minPrecision; + uint16 pad0; +} +#include "vmware_pack_end.h" +SVGA3dSignatureEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineShader { + SVGA3dShaderId shaderId; + SVGA3dShaderType type; + uint32 sizeInBytes; /* Number of bytes of shader text. */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */ + +typedef +#include "vmware_pack_begin.h" +struct SVGACOTableDXShaderEntry { + SVGA3dShaderType type; + uint32 sizeInBytes; + uint32 offsetInBytes; + SVGAMobId mobid; + uint32 numInputSignatureEntries; + uint32 numOutputSignatureEntries; + + uint32 numPatchConstantSignatureEntries; + + uint32 pad; +} +#include "vmware_pack_end.h" +SVGACOTableDXShaderEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyShader { + SVGA3dShaderId shaderId; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBindShader { + uint32 cid; + uint32 shid; + SVGAMobId mobid; + uint32 offsetInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */ + +/* + * The maximum number of streamout decl's in each streamout entry. + */ +#define SVGA3D_MAX_STREAMOUT_DECLS 64 + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dStreamOutputDeclarationEntry { + uint32 outputSlot; + uint32 registerIndex; + uint8 registerMask; + uint8 pad0; + uint16 pad1; + uint32 stream; +} +#include "vmware_pack_end.h" +SVGA3dStreamOutputDeclarationEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGAOTableStreamOutputEntry { + uint32 numOutputStreamEntries; + SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS]; + uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; + uint32 rasterizedStream; + uint32 pad[250]; +} +#include "vmware_pack_end.h" +SVGACOTableDXStreamOutputEntry; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDefineStreamOutput { + SVGA3dStreamOutputId soid; + uint32 numOutputStreamEntries; + SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS]; + uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; + uint32 rasterizedStream; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXDestroyStreamOutput { + SVGA3dStreamOutputId soid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetStreamOutput { + SVGA3dStreamOutputId soid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint64 value; + uint32 mobId; + uint32 mobOffset; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */ + +/* + * SVGA3dCmdSetCOTable -- + * + * This command allows the guest to bind a mob to a context-object table. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetCOTable { + uint32 cid; + uint32 mobid; + SVGACOTableType type; + uint32 validSizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXReadbackCOTable { + uint32 cid; + SVGACOTableType type; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCOTableData { + uint32 mobid; +} +#include "vmware_pack_end.h" +SVGA3dCOTableData; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dBufferBinding { + uint32 bufferId; + uint32 stride; + uint32 offset; +} +#include "vmware_pack_end.h" +SVGA3dBufferBinding; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dConstantBufferBinding { + uint32 sid; + uint32 offsetInBytes; + uint32 sizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dConstantBufferBinding; + +typedef +#include "vmware_pack_begin.h" +struct SVGADXInputAssemblyMobFormat { + uint32 layoutId; + SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; + uint32 indexBufferSid; + uint32 pad; + uint32 indexBufferOffset; + uint32 indexBufferFormat; + uint32 topology; +} +#include "vmware_pack_end.h" +SVGADXInputAssemblyMobFormat; + +typedef +#include "vmware_pack_begin.h" +struct SVGADXContextMobFormat { + SVGADXInputAssemblyMobFormat inputAssembly; + + struct { + uint32 blendStateId; + uint32 blendFactor[4]; + uint32 sampleMask; + uint32 depthStencilStateId; + uint32 stencilRef; + uint32 rasterizerStateId; + uint32 depthStencilViewId; + uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS]; + uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS]; + } renderState; + + struct { + uint32 targets[SVGA3D_DX_MAX_SOTARGETS]; + uint32 soid; + } streamOut; + uint32 pad0[11]; + + uint8 numViewports; + uint8 numScissorRects; + uint16 pad1[1]; + + uint32 pad2[3]; + + SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS]; + uint32 pad3[32]; + + SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS]; + uint32 pad4[64]; + + struct { + uint32 queryID; + uint32 value; + } predication; + uint32 pad5[2]; + + struct { + uint32 shaderId; + SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS]; + uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS]; + uint32 samplers[SVGA3D_DX_MAX_SAMPLERS]; + } shaderState[SVGA3D_NUM_SHADERTYPE]; + uint32 pad6[26]; + + SVGA3dQueryId queryID[SVGA3D_MAX_QUERY]; + + SVGA3dCOTableData cotables[SVGA_COTABLE_MAX]; + uint32 pad7[381]; +} +#include "vmware_pack_end.h" +SVGADXContextMobFormat; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXTempSetContext { + uint32 dxcid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */ + +#endif /* _SVGA3D_DX_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h new file mode 100644 index 000000000000..a1c36877ad55 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h @@ -0,0 +1,99 @@ +/********************************************************** + * Copyright 2007-2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +/* + * svga3d_limits.h -- + * + * SVGA 3d hardware limits + */ + +#ifndef _SVGA3D_LIMITS_H_ +#define _SVGA3D_LIMITS_H_ + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_VMCORE + +#include "includeCheck.h" + +#define SVGA3D_NUM_CLIPPLANES 6 +#define SVGA3D_MAX_RENDER_TARGETS 8 +#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS) +#define SVGA3D_MAX_UAVIEWS 8 +#define SVGA3D_MAX_CONTEXT_IDS 256 +#define SVGA3D_MAX_SURFACE_IDS (32 * 1024) + +/* + * Maximum ID a shader can be assigned on a given context. + */ +#define SVGA3D_MAX_SHADERIDS 5000 +/* + * Maximum number of shaders of a given type that can be defined + * (including all contexts). + */ +#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000 + +#define SVGA3D_NUM_TEXTURE_UNITS 32 +#define SVGA3D_NUM_LIGHTS 8 + +/* + * Maximum size in dwords of shader text the SVGA device will allow. + * Currently 8 MB. + */ +#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32)) + +#define SVGA3D_MAX_CLIP_PLANES 6 + +/* + * This is the limit to the number of fixed-function texture + * transforms and texture coordinates we can support. It does *not* + * correspond to the number of texture image units (samplers) we + * support! + */ +#define SVGA3D_MAX_TEXTURE_COORDS 8 + +/* + * Number of faces in a cubemap. + */ +#define SVGA3D_MAX_SURFACE_FACES 6 + +/* + * Maximum number of array indexes in a GB surface (with DX enabled). + */ +#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512 + +/* + * The maximum number of vertex arrays we're guaranteed to support in + * SVGA_3D_CMD_DRAWPRIMITIVES. + */ +#define SVGA3D_MAX_VERTEX_ARRAYS 32 + +/* + * The maximum number of primitive ranges we're guaranteed to support + * in SVGA_3D_CMD_DRAWPRIMITIVES. + */ +#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32 + +#endif /* _SVGA3D_LIMITS_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h new file mode 100644 index 000000000000..b44ce648f592 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h @@ -0,0 +1,50 @@ +/********************************************************** + * Copyright 1998-2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +/* + * svga3d_reg.h -- + * + * SVGA 3d hardware definitions + */ + +#ifndef _SVGA3D_REG_H_ +#define _SVGA3D_REG_H_ + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_VMCORE + +#include "includeCheck.h" + +#include "svga_reg.h" + +#include "svga3d_types.h" +#include "svga3d_limits.h" +#include "svga3d_cmd.h" +#include "svga3d_dx.h" +#include "svga3d_devcaps.h" + + +#endif /* _SVGA3D_REG_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h new file mode 100644 index 000000000000..58704f0a4607 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h @@ -0,0 +1,1204 @@ +/************************************************************************** + * + * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifdef __KERNEL__ + +#include <drm/vmwgfx_drm.h> +#define surf_size_struct struct drm_vmw_size + +#else /* __KERNEL__ */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0])) +#endif /* ARRAY_SIZE */ + +#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) +#define max_t(type, x, y) ((x) > (y) ? (x) : (y)) +#define surf_size_struct SVGA3dSize +#define u32 uint32 + +#endif /* __KERNEL__ */ + +#include "svga3d_reg.h" + +/* + * enum svga3d_block_desc describes the active data channels in a block. + * + * There can be at-most four active channels in a block: + * 1. Red, bump W, luminance and depth are stored in the first channel. + * 2. Green, bump V and stencil are stored in the second channel. + * 3. Blue and bump U are stored in the third channel. + * 4. Alpha and bump Q are stored in the fourth channel. + * + * Block channels can be used to store compressed and buffer data: + * 1. For compressed formats, only the data channel is used and its size + * is equal to that of a singular block in the compression scheme. + * 2. For buffer formats, only the data channel is used and its size is + * exactly one byte in length. + * 3. In each case the bit depth represent the size of a singular block. + * + * Note: Compressed and IEEE formats do not use the bitMask structure. + */ + +enum svga3d_block_desc { + SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */ + SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel + data */ + SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel + data */ + SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video + U and V */ + SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel + data */ + SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel + data */ + SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil + channel */ + SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel + data */ + SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel + data */ + SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel + data */ + SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance + data */ + SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */ + SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha + channel */ + SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel + data */ + SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of + data */ + SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of + data depending on the + compression method used */ + SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE + floating point + representation in + all channels */ + SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store + data. */ + SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */ + SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */ + SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */ + SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */ + SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV, + e.g., NV12. */ + SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate + Y, U, V, e.g., YV12. */ + + SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN, + SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG | + SVGA3DBLOCKDESC_BLUE, + SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB | + SVGA3DBLOCKDESC_ALPHA, + SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U | + SVGA3DBLOCKDESC_V, + SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV | + SVGA3DBLOCKDESC_LUMINANCE, + SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV | + SVGA3DBLOCKDESC_W, + SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW | + SVGA3DBLOCKDESC_ALPHA, + SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U | + SVGA3DBLOCKDESC_V | + SVGA3DBLOCKDESC_W | + SVGA3DBLOCKDESC_Q, + SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE | + SVGA3DBLOCKDESC_ALPHA, + SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_IEEE_FP, + SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP | + SVGA3DBLOCKDESC_GREEN, + SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP | + SVGA3DBLOCKDESC_BLUE, + SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP | + SVGA3DBLOCKDESC_ALPHA, + SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | + SVGA3DBLOCKDESC_STENCIL, + SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO | + SVGA3DBLOCKDESC_Y, + SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_Y | + SVGA3DBLOCKDESC_U_VIDEO | + SVGA3DBLOCKDESC_V_VIDEO, + SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB | + SVGA3DBLOCKDESC_EXP, + SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV | + SVGA3DBLOCKDESC_2PLANAR_YUV, + SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV | + SVGA3DBLOCKDESC_3PLANAR_YUV, +}; + +/* + * SVGA3dSurfaceDesc describes the actual pixel data. + * + * This structure provides the following information: + * 1. Block description. + * 2. Dimensions of a block in the surface. + * 3. Size of block in bytes. + * 4. Bit depth of the pixel data. + * 5. Channel bit depths and masks (if applicable). + */ +struct svga3d_channel_def { + union { + u8 blue; + u8 u; + u8 uv_video; + u8 u_video; + }; + union { + u8 green; + u8 v; + u8 stencil; + u8 v_video; + }; + union { + u8 red; + u8 w; + u8 luminance; + u8 y; + u8 depth; + u8 data; + }; + union { + u8 alpha; + u8 q; + u8 exp; + }; +}; + +struct svga3d_surface_desc { + SVGA3dSurfaceFormat format; + enum svga3d_block_desc block_desc; + surf_size_struct block_size; + u32 bytes_per_block; + u32 pitch_bytes_per_block; + + u32 total_bit_depth; + struct svga3d_channel_def bit_depth; + struct svga3d_channel_def bit_offset; +}; + +static const struct svga3d_surface_desc svga3d_surface_descs[] = { + {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE, + {1, 1, 1}, 0, 0, + 0, {{0}, {0}, {0}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 4, 4, + 24, {{8}, {8}, {8}, {0}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 2, 2, + 16, {{5}, {6}, {5}, {0}}, + {{0}, {5}, {11}, {0}}}, + + {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 2, 2, + 15, {{5}, {5}, {5}, {0}}, + {{0}, {5}, {10}, {0}}}, + + {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 2, 2, + 16, {{5}, {5}, {5}, {1}}, + {{0}, {5}, {10}, {15}}}, + + {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 2, 2, + 16, {{4}, {4}, {4}, {4}}, + {{0}, {4}, {8}, {12}}}, + + {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS, + {1, 1, 1}, 4, 4, + 32, {{0}, {8}, {24}, {0}}, + {{0}, {24}, {0}, {0}}}, + + {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS, + {1, 1, 1}, 2, 2, + 16, {{0}, {1}, {15}, {0}}, + {{0}, {15}, {0}, {0}}}, + + {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA, + {1 , 1, 1}, 1, 1, + 8, {{0}, {0}, {4}, {4}}, + {{0}, {0}, {0}, {4}}}, + + {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {8}, {8}}, + {{0}, {0}, {0}, {8}}}, + + {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {8}, {8}}, + {{0}, {0}, {0}, {8}}}, + + {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL, + {1, 1, 1}, 2, 2, + 16, {{5}, {5}, {6}, {0}}, + {{11}, {6}, {0}, {0}}}, + + {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {0}}, + {{16}, {8}, {0}, {0}}}, + + {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL, + {1, 1, 1}, 3, 3, + 24, {{8}, {8}, {8}, {0}}, + {{16}, {8}, {0}, {0}}}, + + {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP, + {1, 1, 1}, 16, 16, + 128, {{32}, {32}, {32}, {32}}, + {{64}, {32}, {0}, {96}}}, + + {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{10}, {10}, {10}, {2}}, + {{0}, {10}, {20}, {30}}}, + + {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV, + {1, 1, 1}, 2, 2, + 16, {{8}, {8}, {0}, {0}}, + {{8}, {0}, {0}, {0}}}, + + {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{24}, {16}, {8}, {0}}}, + + {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV, + {1, 1, 1}, 2, 2, + 16, {{8}, {8}, {0}, {0}}, + {{8}, {0}, {0}, {0}}}, + + {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL, + {1, 1, 1}, 4, 4, + 24, {{8}, {8}, {8}, {0}}, + {{16}, {8}, {0}, {0}}}, + + {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA, + {1, 1, 1}, 4, 4, + 32, {{10}, {10}, {10}, {2}}, + {{0}, {10}, {20}, {30}}}, + + {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {0}, {8}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP, + {1, 1, 1}, 4, 4, + 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, + + {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP, + {1, 1, 1}, 8, 8, + 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {24}, {0}}, + {{0}, {24}, {0}, {0}}}, + + {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV, + {1, 1, 1}, 4, 4, + 32, {{16}, {16}, {0}, {0}}, + {{16}, {0}, {0}, {0}}}, + + {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 4, 4, + 32, {{0}, {16}, {16}, {0}}, + {{0}, {0}, {16}, {0}}}, + + {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV, + {1, 1, 1}, 2, 2, + 16, {{8}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}}, + + {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV, + {1, 1, 1}, 2, 2, + 16, {{8}, {0}, {8}, {0}}, + {{8}, {0}, {0}, {0}}}, + + {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12, + {2, 2, 1}, 6, 2, + 48, {{0}, {0}, {48}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 16, 16, + 128, {{32}, {32}, {32}, {32}}, + {{64}, {32}, {0}, {96}}}, + + {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 16, 16, + 128, {{32}, {32}, {32}, {32}}, + {{64}, {32}, {0}, {96}}}, + + {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ, + {1, 1, 1}, 16, 16, + 128, {{32}, {32}, {32}, {32}}, + {{64}, {32}, {0}, {96}}}, + + {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 12, 12, + 96, {{32}, {32}, {32}, {0}}, + {{64}, {32}, {0}, {0}}}, + + {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP, + {1, 1, 1}, 12, 12, + 96, {{32}, {32}, {32}, {0}}, + {{64}, {32}, {0}, {0}}}, + + {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 12, 12, + 96, {{32}, {32}, {32}, {0}}, + {{64}, {32}, {0}, {0}}}, + + {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW, + {1, 1, 1}, 12, 12, + 96, {{32}, {32}, {32}, {0}}, + {{64}, {32}, {0}, {0}}}, + + {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 8, 8, + 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 8, 8, + 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV, + {1, 1, 1}, 8, 8, + 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 8, 8, + 64, {{0}, {8}, {32}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS, + {1, 1, 1}, 8, 8, + 64, {{0}, {8}, {32}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP, + {1, 1, 1}, 8, 8, + 64, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN, + {1, 1, 1}, 8, 8, + 64, {{0}, {8}, {0}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{10}, {10}, {10}, {2}}, + {{0}, {10}, {20}, {30}}}, + + {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{10}, {10}, {10}, {2}}, + {{0}, {10}, {20}, {30}}}, + + {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP, + {1, 1, 1}, 4, 4, + 32, {{10}, {11}, {11}, {0}}, + {{0}, {10}, {21}, {0}}}, + + {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{16}, {8}, {0}, {24}}}, + + {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{16}, {8}, {0}, {24}}}, + + {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{16}, {8}, {0}, {24}}}, + + {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{16}, {8}, {0}, {24}}}, + + {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{16}, {8}, {0}, {24}}}, + + {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 4, 4, + 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, + + {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP, + {1, 1, 1}, 4, 4, + 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, + + {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV, + {1, 1, 1}, 4, 4, + 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, + + {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 4, 4, + 32, {{0}, {8}, {24}, {0}}, + {{0}, {24}, {0}, {0}}}, + + {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS, + {1, 1, 1}, 4, 4, + 32, {{0}, {8}, {24}, {0}}, + {{0}, {24}, {0}, {0}}}, + + {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {24}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN, + {1, 1, 1}, 4, 4, + 32, {{0}, {8}, {0}, {0}}, + {{0}, {24}, {0}, {0}}}, + + {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 2, 2, + 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, + + {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 2, 2, + 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, + + {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 2, 2, + 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, + + {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV, + {1, 1, 1}, 2, 2, + 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, + + {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_P8, SVGA3DBLOCKDESC_RED, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE, + {1, 1, 1}, 4, 4, + 32, {{9}, {9}, {9}, {5}}, + {{18}, {9}, {0}, {27}}}, + + {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 2, 2, + 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, + + {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 2, 2, + 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, + + {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{10}, {10}, {10}, {2}}, + {{0}, {10}, {20}, {30}}}, + + {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 4, 4, + 24, {{8}, {8}, {8}, {0}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB, + {1, 1, 1}, 4, 4, + 24, {{8}, {8}, {8}, {0}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH, + {1, 1, 1}, 4, 4, + 32, {{0}, {8}, {24}, {0}}, + {{0}, {24}, {0}, {0}}}, + + {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS, + {1, 1, 1}, 4, 4, + 32, {{0}, {8}, {24}, {0}}, + {{0}, {24}, {0}, {0}}}, + + {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12, + {2, 2, 1}, 6, 2, + 48, {{0}, {0}, {48}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP, + {1, 1, 1}, 16, 16, + 128, {{32}, {32}, {32}, {32}}, + {{64}, {32}, {0}, {96}}}, + + {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 8, 8, + 64, {{16}, {16}, {16}, {16}}, + {{32}, {16}, {0}, {48}}}, + + {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP, + {1, 1, 1}, 8, 8, + 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {0}, {0}}}, + + {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{10}, {10}, {10}, {2}}, + {{0}, {10}, {20}, {30}}}, + + {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{24}, {16}, {8}, {0}}}, + + {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP, + {1, 1, 1}, 4, 4, + 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, + + {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 4, 4, + 32, {{0}, {16}, {16}, {0}}, + {{0}, {0}, {16}, {0}}}, + + {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 4, 4, + 32, {{16}, {16}, {0}, {0}}, + {{16}, {0}, {0}, {0}}}, + + {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP, + {1, 1, 1}, 4, 4, + 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG, + {1, 1, 1}, 2, 2, + 16, {{8}, {8}, {0}, {0}}, + {{8}, {0}, {0}, {0}}}, + + {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH, + {1, 1, 1}, 2, 2, + 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA, + {1, 1, 1}, 1, 1, + 8, {{0}, {0}, {0}, {8}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 2, 2, + 16, {{5}, {6}, {5}, {0}}, + {{0}, {5}, {11}, {0}}}, + + {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 2, 2, + 16, {{5}, {5}, {5}, {1}}, + {{0}, {5}, {10}, {15}}}, + + {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA, + {1, 1, 1}, 4, 4, + 32, {{8}, {8}, {8}, {8}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB, + {1, 1, 1}, 4, 4, + 24, {{8}, {8}, {8}, {0}}, + {{0}, {8}, {16}, {24}}}, + + {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 8, 8, + 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {0}, {0}}}, + + {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {4, 4, 1}, 16, 16, + 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {0}, {0}}}, + +}; + +static inline u32 clamped_umul32(u32 a, u32 b) +{ + uint64_t tmp = (uint64_t) a*b; + return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; +} + +static inline const struct svga3d_surface_desc * +svga3dsurface_get_desc(SVGA3dSurfaceFormat format) +{ + if (format < ARRAY_SIZE(svga3d_surface_descs)) + return &svga3d_surface_descs[format]; + + return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID]; +} + +/* + *---------------------------------------------------------------------- + * + * svga3dsurface_get_mip_size -- + * + * Given a base level size and the mip level, compute the size of + * the mip level. + * + * Results: + * See above. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +static inline surf_size_struct +svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level) +{ + surf_size_struct size; + + size.width = max_t(u32, base_level.width >> mip_level, 1); + size.height = max_t(u32, base_level.height >> mip_level, 1); + size.depth = max_t(u32, base_level.depth >> mip_level, 1); + return size; +} + +static inline void +svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc, + const surf_size_struct *pixel_size, + surf_size_struct *block_size) +{ + block_size->width = DIV_ROUND_UP(pixel_size->width, + desc->block_size.width); + block_size->height = DIV_ROUND_UP(pixel_size->height, + desc->block_size.height); + block_size->depth = DIV_ROUND_UP(pixel_size->depth, + desc->block_size.depth); +} + +static inline bool +svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc) +{ + return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0; +} + +static inline u32 +svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc, + const surf_size_struct *size) +{ + u32 pitch; + surf_size_struct blocks; + + svga3dsurface_get_size_in_blocks(desc, size, &blocks); + + pitch = blocks.width * desc->pitch_bytes_per_block; + + return pitch; +} + +/* + *----------------------------------------------------------------------------- + * + * svga3dsurface_get_image_buffer_size -- + * + * Return the number of bytes of buffer space required to store + * one image of a surface, optionally using the specified pitch. + * + * If pitch is zero, it is assumed that rows are tightly packed. + * + * This function is overflow-safe. If the result would have + * overflowed, instead we return MAX_UINT32. + * + * Results: + * Byte count. + * + * Side effects: + * None. + * + *----------------------------------------------------------------------------- + */ + +static inline u32 +svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc, + const surf_size_struct *size, + u32 pitch) +{ + surf_size_struct image_blocks; + u32 slice_size, total_size; + + svga3dsurface_get_size_in_blocks(desc, size, &image_blocks); + + if (svga3dsurface_is_planar_surface(desc)) { + total_size = clamped_umul32(image_blocks.width, + image_blocks.height); + total_size = clamped_umul32(total_size, image_blocks.depth); + total_size = clamped_umul32(total_size, desc->bytes_per_block); + return total_size; + } + + if (pitch == 0) + pitch = svga3dsurface_calculate_pitch(desc, size); + + slice_size = clamped_umul32(image_blocks.height, pitch); + total_size = clamped_umul32(slice_size, image_blocks.depth); + + return total_size; +} + +static inline u32 +svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, + surf_size_struct base_level_size, + u32 num_mip_levels, + u32 num_layers) +{ + const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); + u32 total_size = 0; + u32 mip; + + for (mip = 0; mip < num_mip_levels; mip++) { + surf_size_struct size = + svga3dsurface_get_mip_size(base_level_size, mip); + total_size += svga3dsurface_get_image_buffer_size(desc, + &size, 0); + } + + return total_size * num_layers; +} + + +/** + * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel + * in an image (or volume). + * + * @width: The image width in pixels. + * @height: The image height in pixels + */ +static inline u32 +svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, + u32 width, u32 height, + u32 x, u32 y, u32 z) +{ + const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); + const u32 bw = desc->block_size.width, bh = desc->block_size.height; + const u32 bd = desc->block_size.depth; + const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block; + const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride; + const u32 offset = (z / bd * imgstride + + y / bh * rowstride + + x / bw * desc->bytes_per_block); + return offset; +} + + +static inline u32 +svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, + surf_size_struct baseLevelSize, + u32 numMipLevels, + u32 face, + u32 mip) + +{ + u32 offset; + u32 mipChainBytes; + u32 mipChainBytesToLevel; + u32 i; + const struct svga3d_surface_desc *desc; + surf_size_struct mipSize; + u32 bytes; + + desc = svga3dsurface_get_desc(format); + + mipChainBytes = 0; + mipChainBytesToLevel = 0; + for (i = 0; i < numMipLevels; i++) { + mipSize = svga3dsurface_get_mip_size(baseLevelSize, i); + bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0); + mipChainBytes += bytes; + if (i < mip) + mipChainBytesToLevel += bytes; + } + + offset = mipChainBytes * face + mipChainBytesToLevel; + + return offset; +} + + +/** + * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as + * a ScreenTarget? + * (with just the GBObjects cap-bit + * set) + * @format: format to queried + * + * RETURNS: + * true if queried format is valid for screen targets + */ +static inline bool +svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format) +{ + return (format == SVGA3D_X8R8G8B8 || + format == SVGA3D_A8R8G8B8 || + format == SVGA3D_R5G6B5 || + format == SVGA3D_X1R5G5B5 || + format == SVGA3D_A1R5G5B5 || + format == SVGA3D_P8); +} + + +/** + * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as + * a ScreenTarget? + * (with DX10 enabled) + * + * @format: format to queried + * + * Results: + * true if queried format is valid for screen targets + */ +static inline bool +svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format) +{ + return (format == SVGA3D_R8G8B8A8_UNORM || + format == SVGA3D_B8G8R8A8_UNORM || + format == SVGA3D_B8G8R8X8_UNORM); +} + + +/** + * svga3dsurface_is_screen_target_format - Is the specified format usable as a + * ScreenTarget? + * (for some combination of caps) + * + * @format: format to queried + * + * Results: + * true if queried format is valid for screen targets + */ +static inline bool +svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format) +{ + if (svga3dsurface_is_gb_screen_target_format(format)) { + return true; + } + return svga3dsurface_is_dx_screen_target_format(format); +} diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h new file mode 100644 index 000000000000..27b33ba88430 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h @@ -0,0 +1,1633 @@ +/********************************************************** + * Copyright 2012-2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +/* + * svga3d_types.h -- + * + * SVGA 3d hardware definitions for basic types + */ + +#ifndef _SVGA3D_TYPES_H_ +#define _SVGA3D_TYPES_H_ + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_VMCORE + +#include "includeCheck.h" + +/* + * Generic Types + */ + +#define SVGA3D_INVALID_ID ((uint32)-1) + +typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ +typedef uint32 SVGA3dColor; /* a, r, g, b */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCopyRect { + uint32 x; + uint32 y; + uint32 w; + uint32 h; + uint32 srcx; + uint32 srcy; +} +#include "vmware_pack_end.h" +SVGA3dCopyRect; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCopyBox { + uint32 x; + uint32 y; + uint32 z; + uint32 w; + uint32 h; + uint32 d; + uint32 srcx; + uint32 srcy; + uint32 srcz; +} +#include "vmware_pack_end.h" +SVGA3dCopyBox; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dRect { + uint32 x; + uint32 y; + uint32 w; + uint32 h; +} +#include "vmware_pack_end.h" +SVGA3dRect; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 x; + uint32 y; + uint32 z; + uint32 w; + uint32 h; + uint32 d; +} +#include "vmware_pack_end.h" +SVGA3dBox; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 x; + uint32 y; + uint32 z; +} +#include "vmware_pack_end.h" +SVGA3dPoint; + +/* + * Surface formats. + */ +typedef enum SVGA3dSurfaceFormat { + SVGA3D_FORMAT_INVALID = 0, + + SVGA3D_X8R8G8B8 = 1, + SVGA3D_FORMAT_MIN = 1, + + SVGA3D_A8R8G8B8 = 2, + + SVGA3D_R5G6B5 = 3, + SVGA3D_X1R5G5B5 = 4, + SVGA3D_A1R5G5B5 = 5, + SVGA3D_A4R4G4B4 = 6, + + SVGA3D_Z_D32 = 7, + SVGA3D_Z_D16 = 8, + SVGA3D_Z_D24S8 = 9, + SVGA3D_Z_D15S1 = 10, + + SVGA3D_LUMINANCE8 = 11, + SVGA3D_LUMINANCE4_ALPHA4 = 12, + SVGA3D_LUMINANCE16 = 13, + SVGA3D_LUMINANCE8_ALPHA8 = 14, + + SVGA3D_DXT1 = 15, + SVGA3D_DXT2 = 16, + SVGA3D_DXT3 = 17, + SVGA3D_DXT4 = 18, + SVGA3D_DXT5 = 19, + + SVGA3D_BUMPU8V8 = 20, + SVGA3D_BUMPL6V5U5 = 21, + SVGA3D_BUMPX8L8V8U8 = 22, + SVGA3D_BUMPL8V8U8 = 23, + + SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ + SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ + + SVGA3D_A2R10G10B10 = 26, + + /* signed formats */ + SVGA3D_V8U8 = 27, + SVGA3D_Q8W8V8U8 = 28, + SVGA3D_CxV8U8 = 29, + + /* mixed formats */ + SVGA3D_X8L8V8U8 = 30, + SVGA3D_A2W10V10U10 = 31, + + SVGA3D_ALPHA8 = 32, + + /* Single- and dual-component floating point formats */ + SVGA3D_R_S10E5 = 33, + SVGA3D_R_S23E8 = 34, + SVGA3D_RG_S10E5 = 35, + SVGA3D_RG_S23E8 = 36, + + SVGA3D_BUFFER = 37, + + SVGA3D_Z_D24X8 = 38, + + SVGA3D_V16U16 = 39, + + SVGA3D_G16R16 = 40, + SVGA3D_A16B16G16R16 = 41, + + /* Packed Video formats */ + SVGA3D_UYVY = 42, + SVGA3D_YUY2 = 43, + + /* Planar video formats */ + SVGA3D_NV12 = 44, + + /* Video format with alpha */ + SVGA3D_AYUV = 45, + + SVGA3D_R32G32B32A32_TYPELESS = 46, + SVGA3D_R32G32B32A32_UINT = 47, + SVGA3D_R32G32B32A32_SINT = 48, + SVGA3D_R32G32B32_TYPELESS = 49, + SVGA3D_R32G32B32_FLOAT = 50, + SVGA3D_R32G32B32_UINT = 51, + SVGA3D_R32G32B32_SINT = 52, + SVGA3D_R16G16B16A16_TYPELESS = 53, + SVGA3D_R16G16B16A16_UINT = 54, + SVGA3D_R16G16B16A16_SNORM = 55, + SVGA3D_R16G16B16A16_SINT = 56, + SVGA3D_R32G32_TYPELESS = 57, + SVGA3D_R32G32_UINT = 58, + SVGA3D_R32G32_SINT = 59, + SVGA3D_R32G8X24_TYPELESS = 60, + SVGA3D_D32_FLOAT_S8X24_UINT = 61, + SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, + SVGA3D_X32_TYPELESS_G8X24_UINT = 63, + SVGA3D_R10G10B10A2_TYPELESS = 64, + SVGA3D_R10G10B10A2_UINT = 65, + SVGA3D_R11G11B10_FLOAT = 66, + SVGA3D_R8G8B8A8_TYPELESS = 67, + SVGA3D_R8G8B8A8_UNORM = 68, + SVGA3D_R8G8B8A8_UNORM_SRGB = 69, + SVGA3D_R8G8B8A8_UINT = 70, + SVGA3D_R8G8B8A8_SINT = 71, + SVGA3D_R16G16_TYPELESS = 72, + SVGA3D_R16G16_UINT = 73, + SVGA3D_R16G16_SINT = 74, + SVGA3D_R32_TYPELESS = 75, + SVGA3D_D32_FLOAT = 76, + SVGA3D_R32_UINT = 77, + SVGA3D_R32_SINT = 78, + SVGA3D_R24G8_TYPELESS = 79, + SVGA3D_D24_UNORM_S8_UINT = 80, + SVGA3D_R24_UNORM_X8_TYPELESS = 81, + SVGA3D_X24_TYPELESS_G8_UINT = 82, + SVGA3D_R8G8_TYPELESS = 83, + SVGA3D_R8G8_UNORM = 84, + SVGA3D_R8G8_UINT = 85, + SVGA3D_R8G8_SINT = 86, + SVGA3D_R16_TYPELESS = 87, + SVGA3D_R16_UNORM = 88, + SVGA3D_R16_UINT = 89, + SVGA3D_R16_SNORM = 90, + SVGA3D_R16_SINT = 91, + SVGA3D_R8_TYPELESS = 92, + SVGA3D_R8_UNORM = 93, + SVGA3D_R8_UINT = 94, + SVGA3D_R8_SNORM = 95, + SVGA3D_R8_SINT = 96, + SVGA3D_P8 = 97, + SVGA3D_R9G9B9E5_SHAREDEXP = 98, + SVGA3D_R8G8_B8G8_UNORM = 99, + SVGA3D_G8R8_G8B8_UNORM = 100, + SVGA3D_BC1_TYPELESS = 101, + SVGA3D_BC1_UNORM_SRGB = 102, + SVGA3D_BC2_TYPELESS = 103, + SVGA3D_BC2_UNORM_SRGB = 104, + SVGA3D_BC3_TYPELESS = 105, + SVGA3D_BC3_UNORM_SRGB = 106, + SVGA3D_BC4_TYPELESS = 107, + SVGA3D_ATI1 = 108, /* DX9-specific BC4_UNORM */ + SVGA3D_BC4_SNORM = 109, + SVGA3D_BC5_TYPELESS = 110, + SVGA3D_ATI2 = 111, /* DX9-specific BC5_UNORM */ + SVGA3D_BC5_SNORM = 112, + SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, + SVGA3D_B8G8R8A8_TYPELESS = 114, + SVGA3D_B8G8R8A8_UNORM_SRGB = 115, + SVGA3D_B8G8R8X8_TYPELESS = 116, + SVGA3D_B8G8R8X8_UNORM_SRGB = 117, + + /* Advanced depth formats. */ + SVGA3D_Z_DF16 = 118, + SVGA3D_Z_DF24 = 119, + SVGA3D_Z_D24S8_INT = 120, + + /* Planar video formats. */ + SVGA3D_YV12 = 121, + + SVGA3D_R32G32B32A32_FLOAT = 122, + SVGA3D_R16G16B16A16_FLOAT = 123, + SVGA3D_R16G16B16A16_UNORM = 124, + SVGA3D_R32G32_FLOAT = 125, + SVGA3D_R10G10B10A2_UNORM = 126, + SVGA3D_R8G8B8A8_SNORM = 127, + SVGA3D_R16G16_FLOAT = 128, + SVGA3D_R16G16_UNORM = 129, + SVGA3D_R16G16_SNORM = 130, + SVGA3D_R32_FLOAT = 131, + SVGA3D_R8G8_SNORM = 132, + SVGA3D_R16_FLOAT = 133, + SVGA3D_D16_UNORM = 134, + SVGA3D_A8_UNORM = 135, + SVGA3D_BC1_UNORM = 136, + SVGA3D_BC2_UNORM = 137, + SVGA3D_BC3_UNORM = 138, + SVGA3D_B5G6R5_UNORM = 139, + SVGA3D_B5G5R5A1_UNORM = 140, + SVGA3D_B8G8R8A8_UNORM = 141, + SVGA3D_B8G8R8X8_UNORM = 142, + SVGA3D_BC4_UNORM = 143, + SVGA3D_BC5_UNORM = 144, + + SVGA3D_FORMAT_MAX +} SVGA3dSurfaceFormat; + +typedef enum SVGA3dSurfaceFlags { + SVGA3D_SURFACE_CUBEMAP = (1 << 0), + + /* + * HINT flags are not enforced by the device but are useful for + * performance. + */ + SVGA3D_SURFACE_HINT_STATIC = (1 << 1), + SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2), + SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3), + SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4), + SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5), + SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6), + SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7), + SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8), + SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9), + SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10), + SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11), + + /* + * Is this surface using a base-level pitch for it's mob backing? + * + * This flag is not intended to be set by guest-drivers, but is instead + * set by the device when the surface is bound to a mob with a specified + * pitch. + */ + SVGA3D_SURFACE_MOB_PITCH = (1 << 12), + + SVGA3D_SURFACE_INACTIVE = (1 << 13), + SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14), + SVGA3D_SURFACE_VOLUME = (1 << 15), + + /* + * Required to be set on a surface to bind it to a screen target. + */ + SVGA3D_SURFACE_SCREENTARGET = (1 << 16), + + /* + * Align images in the guest-backing mob to 16-bytes. + */ + SVGA3D_SURFACE_ALIGN16 = (1 << 17), + + SVGA3D_SURFACE_1D = (1 << 18), + SVGA3D_SURFACE_ARRAY = (1 << 19), + + /* + * Bind flags. + * These are enforced for any surface defined with DefineGBSurface_v2. + */ + SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20), + SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21), + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22), + SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23), + SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24), + SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25), + SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26), + + /* + * A note on staging flags: + * + * The STAGING flags notes that the surface will not be used directly by the + * drawing pipeline, i.e. that it will not be bound to any bind point. + * Staging surfaces may be used by copy operations to move data in and out + * of other surfaces. + * + * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive + * updates indirectly, i.e. the surface will not be updated directly, but + * will receive copies from staging surfaces. + */ + SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27), + SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28), + SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29), + + /* + * Setting this flag allow this surface to be used with the + * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for + * buffer surfaces, an no bind flags are allowed to be set on surfaces + * with this flag. + */ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30), + + /* + * Marker for the last defined bit. + */ + SVGA3D_SURFACE_FLAG_MAX = (1 << 31), +} SVGA3dSurfaceFlags; + +#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \ + ( SVGA3D_SURFACE_MOB_PITCH | \ + SVGA3D_SURFACE_SCREENTARGET | \ + SVGA3D_SURFACE_ALIGN16 | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ + SVGA3D_SURFACE_STAGING_UPLOAD | \ + SVGA3D_SURFACE_STAGING_DOWNLOAD | \ + SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + ) + +#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \ + ( SVGA3D_SURFACE_CUBEMAP | \ + SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \ + SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_DECODE_RENDERTARGET | \ + SVGA3D_SURFACE_VOLUME | \ + SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_ARRAY | \ + SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ + SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + ) + +#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \ + ( SVGA3D_SURFACE_CUBEMAP | \ + SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_DECODE_RENDERTARGET | \ + SVGA3D_SURFACE_VOLUME | \ + SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ + SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ + SVGA3D_SURFACE_INACTIVE | \ + SVGA3D_SURFACE_STAGING_UPLOAD | \ + SVGA3D_SURFACE_STAGING_DOWNLOAD | \ + SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + ) + +#define SVGA3D_SURFACE_DX_ONLY_MASK \ + ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + +#define SVGA3D_SURFACE_STAGING_MASK \ + ( SVGA3D_SURFACE_STAGING_UPLOAD | \ + SVGA3D_SURFACE_STAGING_DOWNLOAD \ + ) + +#define SVGA3D_SURFACE_BIND_MASK \ + ( SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ + SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \ + SVGA3D_SURFACE_BIND_RENDER_TARGET | \ + SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT \ + ) + +typedef enum { + SVGA3DFORMAT_OP_TEXTURE = 0x00000001, + SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002, + SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004, + SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008, + SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010, + SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040, + SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080, + +/* + * This format can be used as a render target if the current display mode + * is the same depth if the alpha channel is ignored. e.g. if the device + * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the + * format op list entry for A8R8G8B8 should have this cap. + */ + SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100, + +/* + * This format contains DirectDraw support (including Flip). This flag + * should not to be set on alpha formats. + */ + SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400, + +/* + * The rasterizer can support some level of Direct3D support in this format + * and implies that the driver can create a Context in this mode (for some + * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE + * flag must also be set. + */ + SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800, + +/* + * This is set for a private format when the driver has put the bpp in + * the structure. + */ + SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000, + +/* + * Indicates that this format can be converted to any RGB format for which + * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified + */ + SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000, + +/* + * Indicates that this format can be used to create offscreen plain surfaces. + */ + SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000, + +/* + * Indicated that this format can be read as an SRGB texture (meaning that the + * sampler will linearize the looked up data) + */ + SVGA3DFORMAT_OP_SRGBREAD = 0x00008000, + +/* + * Indicates that this format can be used in the bumpmap instructions + */ + SVGA3DFORMAT_OP_BUMPMAP = 0x00010000, + +/* + * Indicates that this format can be sampled by the displacement map sampler + */ + SVGA3DFORMAT_OP_DMAP = 0x00020000, + +/* + * Indicates that this format cannot be used with texture filtering + */ + SVGA3DFORMAT_OP_NOFILTER = 0x00040000, + +/* + * Indicates that format conversions are supported to this RGB format if + * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format. + */ + SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000, + +/* + * Indicated that this format can be written as an SRGB target + * (meaning that the pixel pipe will DE-linearize data on output to format) + */ + SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000, + +/* + * Indicates that this format cannot be used with alpha blending + */ + SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000, + +/* + * Indicates that the device can auto-generated sublevels for resources + * of this format + */ + SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000, + +/* + * Indicates that this format can be used by vertex texture sampler + */ + SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000, + +/* + * Indicates that this format supports neither texture coordinate + * wrap modes, nor mipmapping. + */ + SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000 +} SVGA3dFormatOp; + +#define SVGA3D_FORMAT_POSITIVE \ + (SVGA3DFORMAT_OP_TEXTURE | \ + SVGA3DFORMAT_OP_VOLUMETEXTURE | \ + SVGA3DFORMAT_OP_CUBETEXTURE | \ + SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \ + SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | \ + SVGA3DFORMAT_OP_ZSTENCIL | \ + SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \ + SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \ + SVGA3DFORMAT_OP_DISPLAYMODE | \ + SVGA3DFORMAT_OP_3DACCELERATION | \ + SVGA3DFORMAT_OP_PIXELSIZE | \ + SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \ + SVGA3DFORMAT_OP_OFFSCREENPLAIN | \ + SVGA3DFORMAT_OP_SRGBREAD | \ + SVGA3DFORMAT_OP_BUMPMAP | \ + SVGA3DFORMAT_OP_DMAP | \ + SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | \ + SVGA3DFORMAT_OP_SRGBWRITE | \ + SVGA3DFORMAT_OP_AUTOGENMIPMAP | \ + SVGA3DFORMAT_OP_VERTEXTEXTURE) + +#define SVGA3D_FORMAT_NEGATIVE \ + (SVGA3DFORMAT_OP_NOFILTER | \ + SVGA3DFORMAT_OP_NOALPHABLEND | \ + SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP) + +/* + * This structure is a conversion of SVGA3DFORMAT_OP_* + * Entries must be located at the same position. + */ +typedef union { + uint32 value; + struct { + uint32 texture : 1; + uint32 volumeTexture : 1; + uint32 cubeTexture : 1; + uint32 offscreenRenderTarget : 1; + uint32 sameFormatRenderTarget : 1; + uint32 unknown1 : 1; + uint32 zStencil : 1; + uint32 zStencilArbitraryDepth : 1; + uint32 sameFormatUpToAlpha : 1; + uint32 unknown2 : 1; + uint32 displayMode : 1; + uint32 acceleration3d : 1; + uint32 pixelSize : 1; + uint32 convertToARGB : 1; + uint32 offscreenPlain : 1; + uint32 sRGBRead : 1; + uint32 bumpMap : 1; + uint32 dmap : 1; + uint32 noFilter : 1; + uint32 memberOfGroupARGB : 1; + uint32 sRGBWrite : 1; + uint32 noAlphaBlend : 1; + uint32 autoGenMipMap : 1; + uint32 vertexTexture : 1; + uint32 noTexCoordWrapNorMip : 1; + }; +} SVGA3dSurfaceFormatCaps; + +/* + * SVGA_3D_CMD_SETRENDERSTATE Types. All value types + * must fit in a uint32. + */ + +typedef enum { + SVGA3D_RS_INVALID = 0, + SVGA3D_RS_MIN = 1, + SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */ + SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */ + SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */ + SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */ + SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */ + SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */ + SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */ + SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */ + SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */ + SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */ + SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */ + SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */ + SVGA3D_RS_STENCILREF = 13, /* uint32 */ + SVGA3D_RS_STENCILMASK = 14, /* uint32 */ + SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */ + SVGA3D_RS_FOGSTART = 16, /* float */ + SVGA3D_RS_FOGEND = 17, /* float */ + SVGA3D_RS_FOGDENSITY = 18, /* float */ + SVGA3D_RS_POINTSIZE = 19, /* float */ + SVGA3D_RS_POINTSIZEMIN = 20, /* float */ + SVGA3D_RS_POINTSIZEMAX = 21, /* float */ + SVGA3D_RS_POINTSCALE_A = 22, /* float */ + SVGA3D_RS_POINTSCALE_B = 23, /* float */ + SVGA3D_RS_POINTSCALE_C = 24, /* float */ + SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */ + SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */ + SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */ + SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */ + SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */ + SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */ + SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */ + SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */ + SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */ + SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */ + SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */ + SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */ + SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */ + SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */ + SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */ + SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */ + SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */ + SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */ + SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */ + SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */ + SVGA3D_RS_ZBIAS = 45, /* float */ + SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */ + SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */ + SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */ + SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */ + SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */ + SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */ + SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */ + SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */ + SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */ + SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */ + SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */ + SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */ + SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */ + SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */ + SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */ + SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */ + SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */ + SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */ + SVGA3D_RS_DEPTHBIAS = 64, /* float */ + + + /* + * Output Gamma Level + * + * Output gamma effects the gamma curve of colors that are output from the + * rendering pipeline. A value of 1.0 specifies a linear color space. If the + * value is <= 0.0, gamma correction is ignored and linear color space is + * used. + */ + + SVGA3D_RS_OUTPUTGAMMA = 65, /* float */ + SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */ + SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */ + SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */ + SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */ + SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */ + SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */ + SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */ + SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */ + SVGA3D_RS_TWEENFACTOR = 88, /* float */ + SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */ + SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */ + SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */ + SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */ + SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */ + SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */ + SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */ + SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */ + SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */ + SVGA3D_RS_LINEWIDTH = 98, /* float */ + SVGA3D_RS_MAX +} SVGA3dRenderStateName; + +typedef enum { + SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0, + SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1, + SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2, + SVGA3D_TRANSPARENCYANTIALIAS_MAX +} SVGA3dTransparencyAntialiasType; + +typedef enum { + SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */ + SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */ + SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */ + SVGA3D_VERTEXMATERIAL_MAX = 3, +} SVGA3dVertexMaterial; + +typedef enum { + SVGA3D_FILLMODE_INVALID = 0, + SVGA3D_FILLMODE_MIN = 1, + SVGA3D_FILLMODE_POINT = 1, + SVGA3D_FILLMODE_LINE = 2, + SVGA3D_FILLMODE_FILL = 3, + SVGA3D_FILLMODE_MAX +} SVGA3dFillModeType; + + +typedef +#include "vmware_pack_begin.h" +union { + struct { + uint16 mode; /* SVGA3dFillModeType */ + uint16 face; /* SVGA3dFace */ + }; + uint32 uintValue; +} +#include "vmware_pack_end.h" +SVGA3dFillMode; + +typedef enum { + SVGA3D_SHADEMODE_INVALID = 0, + SVGA3D_SHADEMODE_FLAT = 1, + SVGA3D_SHADEMODE_SMOOTH = 2, + SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */ + SVGA3D_SHADEMODE_MAX +} SVGA3dShadeMode; + +typedef +#include "vmware_pack_begin.h" +union { + struct { + uint16 repeat; + uint16 pattern; + }; + uint32 uintValue; +} +#include "vmware_pack_end.h" +SVGA3dLinePattern; + +typedef enum { + SVGA3D_BLENDOP_INVALID = 0, + SVGA3D_BLENDOP_MIN = 1, + SVGA3D_BLENDOP_ZERO = 1, + SVGA3D_BLENDOP_ONE = 2, + SVGA3D_BLENDOP_SRCCOLOR = 3, + SVGA3D_BLENDOP_INVSRCCOLOR = 4, + SVGA3D_BLENDOP_SRCALPHA = 5, + SVGA3D_BLENDOP_INVSRCALPHA = 6, + SVGA3D_BLENDOP_DESTALPHA = 7, + SVGA3D_BLENDOP_INVDESTALPHA = 8, + SVGA3D_BLENDOP_DESTCOLOR = 9, + SVGA3D_BLENDOP_INVDESTCOLOR = 10, + SVGA3D_BLENDOP_SRCALPHASAT = 11, + SVGA3D_BLENDOP_BLENDFACTOR = 12, + SVGA3D_BLENDOP_INVBLENDFACTOR = 13, + SVGA3D_BLENDOP_SRC1COLOR = 14, + SVGA3D_BLENDOP_INVSRC1COLOR = 15, + SVGA3D_BLENDOP_SRC1ALPHA = 16, + SVGA3D_BLENDOP_INVSRC1ALPHA = 17, + SVGA3D_BLENDOP_BLENDFACTORALPHA = 18, + SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19, + SVGA3D_BLENDOP_MAX +} SVGA3dBlendOp; + +typedef enum { + SVGA3D_BLENDEQ_INVALID = 0, + SVGA3D_BLENDEQ_MIN = 1, + SVGA3D_BLENDEQ_ADD = 1, + SVGA3D_BLENDEQ_SUBTRACT = 2, + SVGA3D_BLENDEQ_REVSUBTRACT = 3, + SVGA3D_BLENDEQ_MINIMUM = 4, + SVGA3D_BLENDEQ_MAXIMUM = 5, + SVGA3D_BLENDEQ_MAX +} SVGA3dBlendEquation; + +typedef enum { + SVGA3D_DX11_LOGICOP_MIN = 0, + SVGA3D_DX11_LOGICOP_CLEAR = 0, + SVGA3D_DX11_LOGICOP_SET = 1, + SVGA3D_DX11_LOGICOP_COPY = 2, + SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3, + SVGA3D_DX11_LOGICOP_NOOP = 4, + SVGA3D_DX11_LOGICOP_INVERT = 5, + SVGA3D_DX11_LOGICOP_AND = 6, + SVGA3D_DX11_LOGICOP_NAND = 7, + SVGA3D_DX11_LOGICOP_OR = 8, + SVGA3D_DX11_LOGICOP_NOR = 9, + SVGA3D_DX11_LOGICOP_XOR = 10, + SVGA3D_DX11_LOGICOP_EQUIV = 11, + SVGA3D_DX11_LOGICOP_AND_REVERSE = 12, + SVGA3D_DX11_LOGICOP_AND_INVERTED = 13, + SVGA3D_DX11_LOGICOP_OR_REVERSE = 14, + SVGA3D_DX11_LOGICOP_OR_INVERTED = 15, + SVGA3D_DX11_LOGICOP_MAX +} SVGA3dDX11LogicOp; + +typedef enum { + SVGA3D_FRONTWINDING_INVALID = 0, + SVGA3D_FRONTWINDING_CW = 1, + SVGA3D_FRONTWINDING_CCW = 2, + SVGA3D_FRONTWINDING_MAX +} SVGA3dFrontWinding; + +typedef enum { + SVGA3D_FACE_INVALID = 0, + SVGA3D_FACE_NONE = 1, + SVGA3D_FACE_MIN = 1, + SVGA3D_FACE_FRONT = 2, + SVGA3D_FACE_BACK = 3, + SVGA3D_FACE_FRONT_BACK = 4, + SVGA3D_FACE_MAX +} SVGA3dFace; + +/* + * The order and the values should not be changed + */ + +typedef enum { + SVGA3D_CMP_INVALID = 0, + SVGA3D_CMP_NEVER = 1, + SVGA3D_CMP_LESS = 2, + SVGA3D_CMP_EQUAL = 3, + SVGA3D_CMP_LESSEQUAL = 4, + SVGA3D_CMP_GREATER = 5, + SVGA3D_CMP_NOTEQUAL = 6, + SVGA3D_CMP_GREATEREQUAL = 7, + SVGA3D_CMP_ALWAYS = 8, + SVGA3D_CMP_MAX +} SVGA3dCmpFunc; + +/* + * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows + * the fog factor to be specified in the alpha component of the specular + * (a.k.a. secondary) vertex color. + */ +typedef enum { + SVGA3D_FOGFUNC_INVALID = 0, + SVGA3D_FOGFUNC_EXP = 1, + SVGA3D_FOGFUNC_EXP2 = 2, + SVGA3D_FOGFUNC_LINEAR = 3, + SVGA3D_FOGFUNC_PER_VERTEX = 4 +} SVGA3dFogFunction; + +/* + * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex + * or per-pixel basis. + */ +typedef enum { + SVGA3D_FOGTYPE_INVALID = 0, + SVGA3D_FOGTYPE_VERTEX = 1, + SVGA3D_FOGTYPE_PIXEL = 2, + SVGA3D_FOGTYPE_MAX = 3 +} SVGA3dFogType; + +/* + * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is + * computed using the eye Z value of each pixel (or vertex), whereas range- + * based fog is computed using the actual distance (range) to the eye. + */ +typedef enum { + SVGA3D_FOGBASE_INVALID = 0, + SVGA3D_FOGBASE_DEPTHBASED = 1, + SVGA3D_FOGBASE_RANGEBASED = 2, + SVGA3D_FOGBASE_MAX = 3 +} SVGA3dFogBase; + +typedef enum { + SVGA3D_STENCILOP_INVALID = 0, + SVGA3D_STENCILOP_MIN = 1, + SVGA3D_STENCILOP_KEEP = 1, + SVGA3D_STENCILOP_ZERO = 2, + SVGA3D_STENCILOP_REPLACE = 3, + SVGA3D_STENCILOP_INCRSAT = 4, + SVGA3D_STENCILOP_DECRSAT = 5, + SVGA3D_STENCILOP_INVERT = 6, + SVGA3D_STENCILOP_INCR = 7, + SVGA3D_STENCILOP_DECR = 8, + SVGA3D_STENCILOP_MAX +} SVGA3dStencilOp; + +typedef enum { + SVGA3D_CLIPPLANE_0 = (1 << 0), + SVGA3D_CLIPPLANE_1 = (1 << 1), + SVGA3D_CLIPPLANE_2 = (1 << 2), + SVGA3D_CLIPPLANE_3 = (1 << 3), + SVGA3D_CLIPPLANE_4 = (1 << 4), + SVGA3D_CLIPPLANE_5 = (1 << 5), +} SVGA3dClipPlanes; + +typedef enum { + SVGA3D_CLEAR_COLOR = 0x1, + SVGA3D_CLEAR_DEPTH = 0x2, + SVGA3D_CLEAR_STENCIL = 0x4, + + /* + * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If + * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this + * bit will be ignored. + */ + SVGA3D_CLEAR_COLORFILL = 0x8 +} SVGA3dClearFlag; + +typedef enum { + SVGA3D_RT_DEPTH = 0, + SVGA3D_RT_MIN = 0, + SVGA3D_RT_STENCIL = 1, + SVGA3D_RT_COLOR0 = 2, + SVGA3D_RT_COLOR1 = 3, + SVGA3D_RT_COLOR2 = 4, + SVGA3D_RT_COLOR3 = 5, + SVGA3D_RT_COLOR4 = 6, + SVGA3D_RT_COLOR5 = 7, + SVGA3D_RT_COLOR6 = 8, + SVGA3D_RT_COLOR7 = 9, + SVGA3D_RT_MAX, + SVGA3D_RT_INVALID = ((uint32)-1), +} SVGA3dRenderTargetType; + +#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1) + +typedef +#include "vmware_pack_begin.h" +union { + struct { + uint32 red : 1; + uint32 green : 1; + uint32 blue : 1; + uint32 alpha : 1; + }; + uint32 uintValue; +} +#include "vmware_pack_end.h" +SVGA3dColorMask; + +typedef enum { + SVGA3D_VBLEND_DISABLE = 0, + SVGA3D_VBLEND_1WEIGHT = 1, + SVGA3D_VBLEND_2WEIGHT = 2, + SVGA3D_VBLEND_3WEIGHT = 3, + SVGA3D_VBLEND_MAX = 4, +} SVGA3dVertexBlendFlags; + +typedef enum { + SVGA3D_WRAPCOORD_0 = 1 << 0, + SVGA3D_WRAPCOORD_1 = 1 << 1, + SVGA3D_WRAPCOORD_2 = 1 << 2, + SVGA3D_WRAPCOORD_3 = 1 << 3, + SVGA3D_WRAPCOORD_ALL = 0xF, +} SVGA3dWrapFlags; + +/* + * SVGA_3D_CMD_TEXTURESTATE Types. All value types + * must fit in a uint32. + */ + +typedef enum { + SVGA3D_TS_INVALID = 0, + SVGA3D_TS_MIN = 1, + SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */ + SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */ + SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */ + SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */ + SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */ + SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */ + SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */ + SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */ + SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */ + SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */ + SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */ + SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */ + SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */ + SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */ + SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */ + SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */ + SVGA3D_TS_BUMPENVMAT00 = 17, /* float */ + SVGA3D_TS_BUMPENVMAT01 = 18, /* float */ + SVGA3D_TS_BUMPENVMAT10 = 19, /* float */ + SVGA3D_TS_BUMPENVMAT11 = 20, /* float */ + SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */ + SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */ + SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */ + SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */ + + + /* + * Sampler Gamma Level + * + * Sampler gamma effects the color of samples taken from the sampler. A + * value of 1.0 will produce linear samples. If the value is <= 0.0 the + * gamma value is ignored and a linear space is used. + */ + + SVGA3D_TS_GAMMA = 25, /* float */ + SVGA3D_TS_BUMPENVLSCALE = 26, /* float */ + SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */ + SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */ + SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */ + SVGA3D_TS_PREGB_MAX = 30, /* Max value before GBObjects */ + SVGA3D_TS_CONSTANT = 30, /* SVGA3dColor */ + SVGA3D_TS_COLOR_KEY_ENABLE = 31, /* SVGA3dBool */ + SVGA3D_TS_COLOR_KEY = 32, /* SVGA3dColor */ + SVGA3D_TS_MAX +} SVGA3dTextureStateName; + +typedef enum { + SVGA3D_TC_INVALID = 0, + SVGA3D_TC_DISABLE = 1, + SVGA3D_TC_SELECTARG1 = 2, + SVGA3D_TC_SELECTARG2 = 3, + SVGA3D_TC_MODULATE = 4, + SVGA3D_TC_ADD = 5, + SVGA3D_TC_ADDSIGNED = 6, + SVGA3D_TC_SUBTRACT = 7, + SVGA3D_TC_BLENDTEXTUREALPHA = 8, + SVGA3D_TC_BLENDDIFFUSEALPHA = 9, + SVGA3D_TC_BLENDCURRENTALPHA = 10, + SVGA3D_TC_BLENDFACTORALPHA = 11, + SVGA3D_TC_MODULATE2X = 12, + SVGA3D_TC_MODULATE4X = 13, + SVGA3D_TC_DSDT = 14, + SVGA3D_TC_DOTPRODUCT3 = 15, + SVGA3D_TC_BLENDTEXTUREALPHAPM = 16, + SVGA3D_TC_ADDSIGNED2X = 17, + SVGA3D_TC_ADDSMOOTH = 18, + SVGA3D_TC_PREMODULATE = 19, + SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20, + SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21, + SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22, + SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23, + SVGA3D_TC_BUMPENVMAPLUMINANCE = 24, + SVGA3D_TC_MULTIPLYADD = 25, + SVGA3D_TC_LERP = 26, + SVGA3D_TC_MAX +} SVGA3dTextureCombiner; + +#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0) + +typedef enum { + SVGA3D_TEX_ADDRESS_INVALID = 0, + SVGA3D_TEX_ADDRESS_MIN = 1, + SVGA3D_TEX_ADDRESS_WRAP = 1, + SVGA3D_TEX_ADDRESS_MIRROR = 2, + SVGA3D_TEX_ADDRESS_CLAMP = 3, + SVGA3D_TEX_ADDRESS_BORDER = 4, + SVGA3D_TEX_ADDRESS_MIRRORONCE = 5, + SVGA3D_TEX_ADDRESS_EDGE = 6, + SVGA3D_TEX_ADDRESS_MAX +} SVGA3dTextureAddress; + +/* + * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is + * disabled, and the rasterizer should use the magnification filter instead. + */ +typedef enum { + SVGA3D_TEX_FILTER_NONE = 0, + SVGA3D_TEX_FILTER_MIN = 0, + SVGA3D_TEX_FILTER_NEAREST = 1, + SVGA3D_TEX_FILTER_LINEAR = 2, + SVGA3D_TEX_FILTER_ANISOTROPIC = 3, + SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */ + SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */ + SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */ + SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */ + SVGA3D_TEX_FILTER_MAX +} SVGA3dTextureFilter; + +typedef enum { + SVGA3D_TEX_TRANSFORM_OFF = 0, + SVGA3D_TEX_TRANSFORM_S = (1 << 0), + SVGA3D_TEX_TRANSFORM_T = (1 << 1), + SVGA3D_TEX_TRANSFORM_R = (1 << 2), + SVGA3D_TEX_TRANSFORM_Q = (1 << 3), + SVGA3D_TEX_PROJECTED = (1 << 15), +} SVGA3dTexTransformFlags; + +typedef enum { + SVGA3D_TEXCOORD_GEN_OFF = 0, + SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1, + SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2, + SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3, + SVGA3D_TEXCOORD_GEN_SPHERE = 4, + SVGA3D_TEXCOORD_GEN_MAX +} SVGA3dTextureCoordGen; + +/* + * Texture argument constants for texture combiner + */ +typedef enum { + SVGA3D_TA_INVALID = 0, + SVGA3D_TA_TFACTOR = 1, + SVGA3D_TA_PREVIOUS = 2, + SVGA3D_TA_DIFFUSE = 3, + SVGA3D_TA_TEXTURE = 4, + SVGA3D_TA_SPECULAR = 5, + SVGA3D_TA_CONSTANT = 6, + SVGA3D_TA_MAX +} SVGA3dTextureArgData; + +#define SVGA3D_TM_MASK_LEN 4 + +/* Modifiers for texture argument constants defined above. */ +typedef enum { + SVGA3D_TM_NONE = 0, + SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN), + SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN), +} SVGA3dTextureArgModifier; + +/* + * Vertex declarations + * + * Notes: + * + * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you + * draw with any POSITIONT vertex arrays, the programmable vertex + * pipeline will be implicitly disabled. Drawing will take place as if + * no vertex shader was bound. + */ + +typedef enum { + SVGA3D_DECLUSAGE_POSITION = 0, + SVGA3D_DECLUSAGE_BLENDWEIGHT, + SVGA3D_DECLUSAGE_BLENDINDICES, + SVGA3D_DECLUSAGE_NORMAL, + SVGA3D_DECLUSAGE_PSIZE, + SVGA3D_DECLUSAGE_TEXCOORD, + SVGA3D_DECLUSAGE_TANGENT, + SVGA3D_DECLUSAGE_BINORMAL, + SVGA3D_DECLUSAGE_TESSFACTOR, + SVGA3D_DECLUSAGE_POSITIONT, + SVGA3D_DECLUSAGE_COLOR, + SVGA3D_DECLUSAGE_FOG, + SVGA3D_DECLUSAGE_DEPTH, + SVGA3D_DECLUSAGE_SAMPLE, + SVGA3D_DECLUSAGE_MAX +} SVGA3dDeclUsage; + +typedef enum { + SVGA3D_DECLMETHOD_DEFAULT = 0, + SVGA3D_DECLMETHOD_PARTIALU, + SVGA3D_DECLMETHOD_PARTIALV, + SVGA3D_DECLMETHOD_CROSSUV, /* Normal */ + SVGA3D_DECLMETHOD_UV, + SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */ + SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */ + /* map */ +} SVGA3dDeclMethod; + +typedef enum { + SVGA3D_DECLTYPE_FLOAT1 = 0, + SVGA3D_DECLTYPE_FLOAT2 = 1, + SVGA3D_DECLTYPE_FLOAT3 = 2, + SVGA3D_DECLTYPE_FLOAT4 = 3, + SVGA3D_DECLTYPE_D3DCOLOR = 4, + SVGA3D_DECLTYPE_UBYTE4 = 5, + SVGA3D_DECLTYPE_SHORT2 = 6, + SVGA3D_DECLTYPE_SHORT4 = 7, + SVGA3D_DECLTYPE_UBYTE4N = 8, + SVGA3D_DECLTYPE_SHORT2N = 9, + SVGA3D_DECLTYPE_SHORT4N = 10, + SVGA3D_DECLTYPE_USHORT2N = 11, + SVGA3D_DECLTYPE_USHORT4N = 12, + SVGA3D_DECLTYPE_UDEC3 = 13, + SVGA3D_DECLTYPE_DEC3N = 14, + SVGA3D_DECLTYPE_FLOAT16_2 = 15, + SVGA3D_DECLTYPE_FLOAT16_4 = 16, + SVGA3D_DECLTYPE_MAX, +} SVGA3dDeclType; + +/* + * This structure is used for the divisor for geometry instancing; + * it's a direct translation of the Direct3D equivalent. + */ +typedef union { + struct { + /* + * For index data, this number represents the number of instances to draw. + * For instance data, this number represents the number of + * instances/vertex in this stream + */ + uint32 count : 30; + + /* + * This is 1 if this is supposed to be the data that is repeated for + * every instance. + */ + uint32 indexedData : 1; + + /* + * This is 1 if this is supposed to be the per-instance data. + */ + uint32 instanceData : 1; + }; + + uint32 value; +} SVGA3dVertexDivisor; + +typedef enum { + /* + * SVGA3D_PRIMITIVE_INVALID is a valid primitive type. + * + * List MIN second so debuggers will think INVALID is + * the correct name. + */ + SVGA3D_PRIMITIVE_INVALID = 0, + SVGA3D_PRIMITIVE_MIN = 0, + SVGA3D_PRIMITIVE_TRIANGLELIST = 1, + SVGA3D_PRIMITIVE_POINTLIST = 2, + SVGA3D_PRIMITIVE_LINELIST = 3, + SVGA3D_PRIMITIVE_LINESTRIP = 4, + SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5, + SVGA3D_PRIMITIVE_TRIANGLEFAN = 6, + SVGA3D_PRIMITIVE_LINELIST_ADJ = 7, + SVGA3D_PRIMITIVE_PREDX_MAX = 7, + SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8, + SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9, + SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10, + SVGA3D_PRIMITIVE_MAX +} SVGA3dPrimitiveType; + +typedef enum { + SVGA3D_COORDINATE_INVALID = 0, + SVGA3D_COORDINATE_LEFTHANDED = 1, + SVGA3D_COORDINATE_RIGHTHANDED = 2, + SVGA3D_COORDINATE_MAX +} SVGA3dCoordinateType; + +typedef enum { + SVGA3D_TRANSFORM_INVALID = 0, + SVGA3D_TRANSFORM_WORLD = 1, + SVGA3D_TRANSFORM_MIN = 1, + SVGA3D_TRANSFORM_VIEW = 2, + SVGA3D_TRANSFORM_PROJECTION = 3, + SVGA3D_TRANSFORM_TEXTURE0 = 4, + SVGA3D_TRANSFORM_TEXTURE1 = 5, + SVGA3D_TRANSFORM_TEXTURE2 = 6, + SVGA3D_TRANSFORM_TEXTURE3 = 7, + SVGA3D_TRANSFORM_TEXTURE4 = 8, + SVGA3D_TRANSFORM_TEXTURE5 = 9, + SVGA3D_TRANSFORM_TEXTURE6 = 10, + SVGA3D_TRANSFORM_TEXTURE7 = 11, + SVGA3D_TRANSFORM_WORLD1 = 12, + SVGA3D_TRANSFORM_WORLD2 = 13, + SVGA3D_TRANSFORM_WORLD3 = 14, + SVGA3D_TRANSFORM_MAX +} SVGA3dTransformType; + +typedef enum { + SVGA3D_LIGHTTYPE_INVALID = 0, + SVGA3D_LIGHTTYPE_MIN = 1, + SVGA3D_LIGHTTYPE_POINT = 1, + SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */ + SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */ + SVGA3D_LIGHTTYPE_DIRECTIONAL = 4, + SVGA3D_LIGHTTYPE_MAX +} SVGA3dLightType; + +typedef enum { + SVGA3D_CUBEFACE_POSX = 0, + SVGA3D_CUBEFACE_NEGX = 1, + SVGA3D_CUBEFACE_POSY = 2, + SVGA3D_CUBEFACE_NEGY = 3, + SVGA3D_CUBEFACE_POSZ = 4, + SVGA3D_CUBEFACE_NEGZ = 5, +} SVGA3dCubeFace; + +typedef enum { + SVGA3D_SHADERTYPE_INVALID = 0, + SVGA3D_SHADERTYPE_MIN = 1, + SVGA3D_SHADERTYPE_VS = 1, + SVGA3D_SHADERTYPE_PS = 2, + SVGA3D_SHADERTYPE_PREDX_MAX = 3, + SVGA3D_SHADERTYPE_GS = 3, + SVGA3D_SHADERTYPE_DX10_MAX = 4, + SVGA3D_SHADERTYPE_HS = 4, + SVGA3D_SHADERTYPE_DS = 5, + SVGA3D_SHADERTYPE_CS = 6, + SVGA3D_SHADERTYPE_MAX = 7 +} SVGA3dShaderType; + +#define SVGA3D_NUM_SHADERTYPE_PREDX \ + (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN) + +#define SVGA3D_NUM_SHADERTYPE_DX10 \ + (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN) + +#define SVGA3D_NUM_SHADERTYPE \ + (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) + +typedef enum { + SVGA3D_CONST_TYPE_MIN = 0, + SVGA3D_CONST_TYPE_FLOAT = 0, + SVGA3D_CONST_TYPE_INT = 1, + SVGA3D_CONST_TYPE_BOOL = 2, + SVGA3D_CONST_TYPE_MAX = 3, +} SVGA3dShaderConstType; + +/* + * Register limits for shader consts. + */ +#define SVGA3D_CONSTREG_MAX 256 +#define SVGA3D_CONSTINTREG_MAX 16 +#define SVGA3D_CONSTBOOLREG_MAX 16 + +typedef enum { + SVGA3D_STRETCH_BLT_POINT = 0, + SVGA3D_STRETCH_BLT_LINEAR = 1, + SVGA3D_STRETCH_BLT_MAX +} SVGA3dStretchBltMode; + +typedef enum { + SVGA3D_QUERYTYPE_INVALID = ((uint8)-1), + SVGA3D_QUERYTYPE_MIN = 0, + SVGA3D_QUERYTYPE_OCCLUSION = 0, + SVGA3D_QUERYTYPE_TIMESTAMP = 1, + SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2, + SVGA3D_QUERYTYPE_PIPELINESTATS = 3, + SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4, + SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5, + SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6, + SVGA3D_QUERYTYPE_OCCLUSION64 = 7, + SVGA3D_QUERYTYPE_EVENT = 8, + SVGA3D_QUERYTYPE_DX10_MAX = 9, + SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9, + SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10, + SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11, + SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12, + SVGA3D_QUERYTYPE_SOP_STREAM0 = 13, + SVGA3D_QUERYTYPE_SOP_STREAM1 = 14, + SVGA3D_QUERYTYPE_SOP_STREAM2 = 15, + SVGA3D_QUERYTYPE_SOP_STREAM3 = 16, + SVGA3D_QUERYTYPE_MAX +} SVGA3dQueryType; + +typedef uint8 SVGA3dQueryTypeUint8; + +#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN) + +/* + * This is the maximum number of queries per context that can be active + * simultaneously between a beginQuery and endQuery. + */ +#define SVGA3D_MAX_QUERY 64 + +/* + * Query result buffer formats + */ +typedef +#include "vmware_pack_begin.h" +struct { + uint32 samplesRendered; +} +#include "vmware_pack_end.h" +SVGADXOcclusionQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 passed; +} +#include "vmware_pack_end.h" +SVGADXEventQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint64 timestamp; +} +#include "vmware_pack_end.h" +SVGADXTimestampQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint64 realFrequency; + uint32 disjoint; +} +#include "vmware_pack_end.h" +SVGADXTimestampDisjointQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint64 inputAssemblyVertices; + uint64 inputAssemblyPrimitives; + uint64 vertexShaderInvocations; + uint64 geometryShaderInvocations; + uint64 geometryShaderPrimitives; + uint64 clipperInvocations; + uint64 clipperPrimitives; + uint64 pixelShaderInvocations; + uint64 hullShaderInvocations; + uint64 domainShaderInvocations; + uint64 computeShaderInvocations; +} +#include "vmware_pack_end.h" +SVGADXPipelineStatisticsQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 anySamplesRendered; +} +#include "vmware_pack_end.h" +SVGADXOcclusionPredicateQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint64 numPrimitivesWritten; + uint64 numPrimitivesRequired; +} +#include "vmware_pack_end.h" +SVGADXStreamOutStatisticsQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 overflowed; +} +#include "vmware_pack_end.h" +SVGADXStreamOutPredicateQueryResult; + +typedef +#include "vmware_pack_begin.h" +struct { + uint64 samplesRendered; +} +#include "vmware_pack_end.h" +SVGADXOcclusion64QueryResult; + +/* + * SVGADXQueryResultUnion is not intended for use in the protocol, but is + * very helpful when working with queries generically. + */ +typedef +#include "vmware_pack_begin.h" +union SVGADXQueryResultUnion { + SVGADXOcclusionQueryResult occ; + SVGADXEventQueryResult event; + SVGADXTimestampQueryResult ts; + SVGADXTimestampDisjointQueryResult tsDisjoint; + SVGADXPipelineStatisticsQueryResult pipelineStats; + SVGADXOcclusionPredicateQueryResult occPred; + SVGADXStreamOutStatisticsQueryResult soStats; + SVGADXStreamOutPredicateQueryResult soPred; + SVGADXOcclusion64QueryResult occ64; +} +#include "vmware_pack_end.h" +SVGADXQueryResultUnion; + + +typedef enum { + SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */ + SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */ + SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully */ + SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (guest only) */ +} SVGA3dQueryState; + +typedef enum { + SVGA3D_WRITE_HOST_VRAM = 1, + SVGA3D_READ_HOST_VRAM = 2, +} SVGA3dTransferType; + +typedef enum { + SVGA3D_LOGICOP_INVALID = 0, + SVGA3D_LOGICOP_MIN = 1, + SVGA3D_LOGICOP_COPY = 1, + SVGA3D_LOGICOP_NOT = 2, + SVGA3D_LOGICOP_AND = 3, + SVGA3D_LOGICOP_OR = 4, + SVGA3D_LOGICOP_XOR = 5, + SVGA3D_LOGICOP_NXOR = 6, + SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */ + SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255), + SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1), +} SVGA3dLogicOp; + +typedef +#include "vmware_pack_begin.h" +struct { + union { + struct { + uint16 function; /* SVGA3dFogFunction */ + uint8 type; /* SVGA3dFogType */ + uint8 base; /* SVGA3dFogBase */ + }; + uint32 uintValue; + }; +} +#include "vmware_pack_end.h" +SVGA3dFogMode; + +/* + * Uniquely identify one image (a 1D/2D/3D array) from a surface. This + * is a surface ID as well as face/mipmap indices. + */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dSurfaceImageId { + uint32 sid; + uint32 face; + uint32 mipmap; +} +#include "vmware_pack_end.h" +SVGA3dSurfaceImageId; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 width; + uint32 height; + uint32 depth; +} +#include "vmware_pack_end.h" +SVGA3dSize; + +/* + * Guest-backed objects definitions. + */ +typedef enum { + SVGA_OTABLE_MOB = 0, + SVGA_OTABLE_MIN = 0, + SVGA_OTABLE_SURFACE = 1, + SVGA_OTABLE_CONTEXT = 2, + SVGA_OTABLE_SHADER = 3, + SVGA_OTABLE_SCREENTARGET = 4, + + SVGA_OTABLE_DX9_MAX = 5, + + SVGA_OTABLE_DXCONTEXT = 5, + SVGA_OTABLE_MAX = 6 +} SVGAOTableType; + +/* + * Deprecated. + */ +#define SVGA_OTABLE_COUNT 4 + +typedef enum { + SVGA_COTABLE_MIN = 0, + SVGA_COTABLE_RTVIEW = 0, + SVGA_COTABLE_DSVIEW = 1, + SVGA_COTABLE_SRVIEW = 2, + SVGA_COTABLE_ELEMENTLAYOUT = 3, + SVGA_COTABLE_BLENDSTATE = 4, + SVGA_COTABLE_DEPTHSTENCIL = 5, + SVGA_COTABLE_RASTERIZERSTATE = 6, + SVGA_COTABLE_SAMPLER = 7, + SVGA_COTABLE_STREAMOUTPUT = 8, + SVGA_COTABLE_DXQUERY = 9, + SVGA_COTABLE_DXSHADER = 10, + SVGA_COTABLE_DX10_MAX = 11, + SVGA_COTABLE_UAVIEW = 11, + SVGA_COTABLE_MAX +} SVGACOTableType; + +/* + * The largest size (number of entries) allowed in a COTable. + */ +#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2) + +typedef enum SVGAMobFormat { + SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, + SVGA3D_MOBFMT_PTDEPTH_0 = 0, + SVGA3D_MOBFMT_MIN = 0, + SVGA3D_MOBFMT_PTDEPTH_1 = 1, + SVGA3D_MOBFMT_PTDEPTH_2 = 2, + SVGA3D_MOBFMT_RANGE = 3, + SVGA3D_MOBFMT_PTDEPTH64_0 = 4, + SVGA3D_MOBFMT_PTDEPTH64_1 = 5, + SVGA3D_MOBFMT_PTDEPTH64_2 = 6, + SVGA3D_MOBFMT_PREDX_MAX = 7, + SVGA3D_MOBFMT_EMPTY = 7, + SVGA3D_MOBFMT_MAX, +} SVGAMobFormat; + +#define SVGA3D_MOB_EMPTY_BASE 1 + +#endif /* _SVGA3D_TYPES_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h index 8e8d9682e018..884b1d1fb85f 100644 --- a/drivers/gpu/drm/vmwgfx/svga_escape.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h @@ -1,5 +1,5 @@ /********************************************************** - * Copyright 2007-2009 VMware, Inc. All rights reserved. + * Copyright 2007-2015 VMware, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h index f38416fcb046..faf6d9b2b891 100644 --- a/drivers/gpu/drm/vmwgfx/svga_overlay.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h @@ -1,5 +1,5 @@ /********************************************************** - * Copyright 2007-2009 VMware, Inc. All rights reserved. + * Copyright 2007-2015 VMware, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -152,19 +152,17 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */ switch (format) { case VMWARE_FOURCC_YV12: *height = (*height + 1) & ~1; - *size = (*width + 3) & ~3; + *size = (*width) * (*height); if (pitches) { - pitches[0] = *size; + pitches[0] = *width; } - *size *= *height; - if (offsets) { offsets[1] = *size; } - tmp = ((*width >> 1) + 3) & ~3; + tmp = *width >> 1; if (pitches) { pitches[1] = pitches[2] = tmp; diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h index e4259c2c1acc..6e0ccb70a700 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h @@ -1,5 +1,5 @@ /********************************************************** - * Copyright 1998-2009 VMware, Inc. All rights reserved. + * Copyright 1998-2015 VMware, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -31,20 +31,38 @@ #ifndef _SVGA_REG_H_ #define _SVGA_REG_H_ +#include <linux/pci_ids.h> + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_USERLEVEL + +#define INCLUDE_ALLOW_VMCORE +#include "includeCheck.h" + +#include "svga_types.h" /* - * PCI device IDs. + * SVGA_REG_ENABLE bit definitions. */ -#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 +typedef enum { + SVGA_REG_ENABLE_DISABLE = 0, + SVGA_REG_ENABLE_ENABLE = (1 << 0), + SVGA_REG_ENABLE_HIDE = (1 << 1), +} SvgaRegEnable; + +typedef uint32 SVGAMobId; /* - * SVGA_REG_ENABLE bit definitions. + * Arbitrary and meaningless limits. Please ignore these when writing + * new drivers. */ -#define SVGA_REG_ENABLE_DISABLE 0 -#define SVGA_REG_ENABLE_ENABLE 1 -#define SVGA_REG_ENABLE_HIDE 2 -#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\ - SVGA_REG_ENABLE_HIDE) +#define SVGA_MAX_WIDTH 2560 +#define SVGA_MAX_HEIGHT 1600 + + +#define SVGA_MAX_BITS_PER_PIXEL 32 +#define SVGA_MAX_DEPTH 24 +#define SVGA_MAX_DISPLAYS 10 /* * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned @@ -57,14 +75,9 @@ #define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */ /* - * The maximum framebuffer size that can traced for e.g. guests in VESA mode. - * The changeMap in the monitor is proportional to this number. Therefore, we'd - * like to keep it as small as possible to reduce monitor overhead (using - * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over - * 4k!). - * - * NB: For compatibility reasons, this value must be greater than 0xff0000. - * See bug 335072. + * The maximum framebuffer size that can traced for guests unless the + * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES. In that case + * the full framebuffer can be traced independent of this limit. */ #define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000 @@ -106,6 +119,8 @@ #define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */ #define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */ #define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */ +#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */ +#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */ /* * Registers @@ -131,6 +146,7 @@ enum { SVGA_REG_FB_SIZE = 16, /* ID 0 implementation only had the above registers, then the palette */ + SVGA_REG_ID_0_TOP = 17, SVGA_REG_CAPABILITIES = 17, SVGA_REG_MEM_START = 18, /* (Deprecated) */ @@ -171,7 +187,7 @@ enum { SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ - SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */ SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ SVGA_REG_CMD_PREPEND_LOW = 53, SVGA_REG_CMD_PREPEND_HIGH = 54, @@ -182,7 +198,6 @@ enum { SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ /* Next 768 (== 256*3) registers exist for colormap */ - SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS /* Base of scratch registers */ /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage: @@ -190,7 +205,6 @@ enum { the use of the current SVGA driver. */ }; - /* * Guest memory regions (GMRs): * @@ -288,17 +302,205 @@ enum { #define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */ typedef +#include "vmware_pack_begin.h" struct SVGAGuestMemDescriptor { uint32 ppn; uint32 numPages; -} SVGAGuestMemDescriptor; +} +#include "vmware_pack_end.h" +SVGAGuestMemDescriptor; typedef +#include "vmware_pack_begin.h" struct SVGAGuestPtr { uint32 gmrId; uint32 offset; -} SVGAGuestPtr; +} +#include "vmware_pack_end.h" +SVGAGuestPtr; + +/* + * Register based command buffers -- + * + * Provide an SVGA device interface that allows the guest to submit + * command buffers to the SVGA device through an SVGA device register. + * The metadata for each command buffer is contained in the + * SVGACBHeader structure along with the return status codes. + * + * The SVGA device supports command buffers if + * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register. The + * fifo must be enabled for command buffers to be submitted. + * + * Command buffers are submitted when the guest writing the 64 byte + * aligned physical address into the SVGA_REG_COMMAND_LOW and + * SVGA_REG_COMMAND_HIGH. SVGA_REG_COMMAND_HIGH contains the upper 32 + * bits of the physical address. SVGA_REG_COMMAND_LOW contains the + * lower 32 bits of the physical address, since the command buffer + * headers are required to be 64 byte aligned the lower 6 bits are + * used for the SVGACBContext value. Writing to SVGA_REG_COMMAND_LOW + * submits the command buffer to the device and queues it for + * execution. The SVGA device supports at least + * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued + * per context and if that limit is reached the device will write the + * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command + * buffer header synchronously and not raise any IRQs. + * + * It is invalid to submit a command buffer without a valid physical + * address and results are undefined. + * + * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE + * will be supported. If a larger command buffer is submitted results + * are unspecified and the device will either complete the command + * buffer or return an error. + * + * The device guarantees that any individual command in a command + * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is + * enough to fit a 64x64 color-cursor definition. If the command is + * too large the device is allowed to process the command or return an + * error. + * + * The device context is a special SVGACBContext that allows for + * synchronous register like accesses with the flexibility of + * commands. There is a different command set defined by + * SVGADeviceContextCmdId. The commands in each command buffer is not + * allowed to straddle physical pages. + * + * The offset field which is available starting with the + * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the + * start of command processing into the buffer. If an error is + * encountered the errorOffset will still be relative to the specific + * PA, not biased by the offset. When the command buffer is finished + * the guest should not read the offset field as there is no guarantee + * what it will set to. + */ + +#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */ +#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32 +#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */ + +#define SVGA_CB_CONTEXT_MASK 0x3f +typedef enum { + SVGA_CB_CONTEXT_DEVICE = 0x3f, + SVGA_CB_CONTEXT_0 = 0x0, + SVGA_CB_CONTEXT_MAX = 0x1, +} SVGACBContext; + + +typedef enum { + /* + * The guest is supposed to write SVGA_CB_STATUS_NONE to the status + * field before submitting the command buffer header, the host will + * change the value when it is done with the command buffer. + */ + SVGA_CB_STATUS_NONE = 0, + + /* + * Written by the host when a command buffer completes successfully. + * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless + * the SVGA_CB_FLAG_NO_IRQ flag is set. + */ + SVGA_CB_STATUS_COMPLETED = 1, + + /* + * Written by the host synchronously with the command buffer + * submission to indicate the command buffer was not submitted. No + * IRQ is raised. + */ + SVGA_CB_STATUS_QUEUE_FULL = 2, + + /* + * Written by the host when an error was detected parsing a command + * in the command buffer, errorOffset is written to contain the + * offset to the first byte of the failing command. The device + * raises the IRQ with both SVGA_IRQFLAG_ERROR and + * SVGA_IRQFLAG_COMMAND_BUFFER. Some of the commands may have been + * processed. + */ + SVGA_CB_STATUS_COMMAND_ERROR = 3, + + /* + * Written by the host if there is an error parsing the command + * buffer header. The device raises the IRQ with both + * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER. The device + * did not processes any of the command buffer. + */ + SVGA_CB_STATUS_CB_HEADER_ERROR = 4, + /* + * Written by the host if the guest requested the host to preempt + * the command buffer. The device will not raise any IRQs and the + * command buffer was not processed. + */ + SVGA_CB_STATUS_PREEMPTED = 5, + + /* + * Written by the host synchronously with the command buffer + * submission to indicate the the command buffer was not submitted + * due to an error. No IRQ is raised. + */ + SVGA_CB_STATUS_SUBMISSION_ERROR = 6, +} SVGACBStatus; + +typedef enum { + SVGA_CB_FLAG_NONE = 0, + SVGA_CB_FLAG_NO_IRQ = 1 << 0, + SVGA_CB_FLAG_DX_CONTEXT = 1 << 1, + SVGA_CB_FLAG_MOB = 1 << 2, +} SVGACBFlags; + +typedef +#include "vmware_pack_begin.h" +struct { + volatile SVGACBStatus status; + volatile uint32 errorOffset; + uint64 id; + SVGACBFlags flags; + uint32 length; + union { + PA pa; + struct { + SVGAMobId mobid; + uint32 mobOffset; + } mob; + } ptr; + uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */ + uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */ + uint32 mustBeZero[6]; +} +#include "vmware_pack_end.h" +SVGACBHeader; + +typedef enum { + SVGA_DC_CMD_NOP = 0, + SVGA_DC_CMD_START_STOP_CONTEXT = 1, + SVGA_DC_CMD_PREEMPT = 2, + SVGA_DC_CMD_MAX = 3, + SVGA_DC_CMD_FORCE_UINT = MAX_UINT32, +} SVGADeviceContextCmdId; + +typedef struct { + uint32 enable; + SVGACBContext context; +} SVGADCCmdStartStop; + +/* + * SVGADCCmdPreempt -- + * + * This command allows the guest to request that all command buffers + * on the specified context be preempted that can be. After execution + * of this command all command buffers that were preempted will + * already have SVGA_CB_STATUS_PREEMPTED written into the status + * field. The device might still be processing a command buffer, + * assuming execution of it started before the preemption request was + * received. Specifying the ignoreIDZero flag to TRUE will cause the + * device to not preempt command buffers with the id field in the + * command buffer header set to zero. + */ + +typedef struct { + SVGACBContext context; + uint32 ignoreIDZero; +} SVGADCCmdPreempt; /* * SVGAGMRImageFormat -- @@ -320,13 +522,12 @@ struct SVGAGuestPtr { * */ -typedef -struct SVGAGMRImageFormat { +typedef struct SVGAGMRImageFormat { union { struct { uint32 bitsPerPixel : 8; uint32 colorDepth : 8; - uint32 reserved : 16; /* Must be zero */ + uint32 reserved : 16; /* Must be zero */ }; uint32 value; @@ -334,6 +535,7 @@ struct SVGAGMRImageFormat { } SVGAGMRImageFormat; typedef +#include "vmware_pack_begin.h" struct SVGAGuestImage { SVGAGuestPtr ptr; @@ -353,7 +555,9 @@ struct SVGAGuestImage { * assuming each row of blocks is tightly packed. */ uint32 pitch; -} SVGAGuestImage; +} +#include "vmware_pack_end.h" +SVGAGuestImage; /* * SVGAColorBGRX -- @@ -363,14 +567,13 @@ struct SVGAGuestImage { * GMRFB state. */ -typedef -struct SVGAColorBGRX { +typedef struct SVGAColorBGRX { union { struct { uint32 b : 8; uint32 g : 8; uint32 r : 8; - uint32 x : 8; /* Unused */ + uint32 x : 8; /* Unused */ }; uint32 value; @@ -392,26 +595,49 @@ struct SVGAColorBGRX { */ typedef -struct SVGASignedRect { +#include "vmware_pack_begin.h" +struct { int32 left; int32 top; int32 right; int32 bottom; -} SVGASignedRect; +} +#include "vmware_pack_end.h" +SVGASignedRect; typedef -struct SVGASignedPoint { +#include "vmware_pack_begin.h" +struct { int32 x; int32 y; -} SVGASignedPoint; +} +#include "vmware_pack_end.h" +SVGASignedPoint; /* - * Capabilities + * SVGA Device Capabilities + * + * Note the holes in the bitfield. Missing bits have been deprecated, + * and must not be reused. Those capabilities will never be reported + * by new versions of the SVGA device. + * + * XXX: Add longer descriptions for each capability, including a list + * of the new features that each capability provides. * - * Note the holes in the bitfield. Missing bits have been deprecated, - * and must not be reused. Those capabilities will never be reported - * by new versions of the SVGA device. + * SVGA_CAP_IRQMASK -- + * Provides device interrupts. Adds device register SVGA_REG_IRQMASK + * to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to + * set/clear pending interrupts. + * + * SVGA_CAP_GMR -- + * Provides synchronous mapping of guest memory regions (GMR). + * Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR, + * SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH. + * + * SVGA_CAP_TRACES -- + * Allows framebuffer trace-based updates even when FIFO is enabled. + * Adds device register SVGA_REG_TRACES. * * SVGA_CAP_GMR2 -- * Provides asynchronous commands to define and remap guest memory @@ -421,21 +647,39 @@ struct SVGASignedPoint { * SVGA_CAP_SCREEN_OBJECT_2 -- * Allow screen object support, and require backing stores from the * guest for each screen object. + * + * SVGA_CAP_COMMAND_BUFFERS -- + * Enable register based command buffer submission. + * + * SVGA_CAP_DEAD1 -- + * This cap was incorrectly used by old drivers and should not be + * reused. + * + * SVGA_CAP_CMD_BUFFERS_2 -- + * Enable support for the prepend command buffer submision + * registers. SVGA_REG_CMD_PREPEND_LOW and + * SVGA_REG_CMD_PREPEND_HIGH. + * + * SVGA_CAP_GBOBJECTS -- + * Enable guest-backed objects and surfaces. + * + * SVGA_CAP_CMD_BUFFERS_3 -- + * Enable support for command buffers in a mob. */ #define SVGA_CAP_NONE 0x00000000 #define SVGA_CAP_RECT_COPY 0x00000002 #define SVGA_CAP_CURSOR 0x00000020 -#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */ -#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */ +#define SVGA_CAP_CURSOR_BYPASS 0x00000040 +#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 #define SVGA_CAP_8BIT_EMULATION 0x00000100 #define SVGA_CAP_ALPHA_CURSOR 0x00000200 #define SVGA_CAP_3D 0x00004000 #define SVGA_CAP_EXTENDED_FIFO 0x00008000 -#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */ +#define SVGA_CAP_MULTIMON 0x00010000 #define SVGA_CAP_PITCHLOCK 0x00020000 #define SVGA_CAP_IRQMASK 0x00040000 -#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */ +#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 #define SVGA_CAP_GMR 0x00100000 #define SVGA_CAP_TRACES 0x00200000 #define SVGA_CAP_GMR2 0x00400000 @@ -444,6 +688,33 @@ struct SVGASignedPoint { #define SVGA_CAP_DEAD1 0x02000000 #define SVGA_CAP_CMD_BUFFERS_2 0x04000000 #define SVGA_CAP_GBOBJECTS 0x08000000 +#define SVGA_CAP_DX 0x10000000 + +#define SVGA_CAP_CMD_RESERVED 0x80000000 + + +/* + * The Guest can optionally read some SVGA device capabilities through + * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before + * the SVGA device is initialized. The type of capability the guest + * is requesting from the SVGABackdoorCapType enum should be placed in + * the upper 16 bits of the backdoor command id (ECX). On success the + * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to + * the requested capability. If the command is not supported then EBX + * will be left unchanged and EAX will be set to -1. Because it is + * possible that -1 is the value of the requested cap the correct way + * to check if the command was successful is to check if EBX was changed + * to BDOOR_MAGIC making sure to initialize the register to something + * else first. + */ + +typedef enum { + SVGABackdoorCapDeviceCaps = 0, + SVGABackdoorCapFifoCaps = 1, + SVGABackdoorCap3dHWVersion = 2, + SVGABackdoorCapMax = 3, +} SVGABackdoorCapType; + /* * FIFO register indices. @@ -883,7 +1154,8 @@ enum { SVGA_VIDEO_PITCH_2, SVGA_VIDEO_PITCH_3, SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */ - SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */ + SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */ + /* (SVGA_ID_INVALID) */ SVGA_VIDEO_NUM_REGS }; @@ -896,7 +1168,9 @@ enum { * video frame to be displayed. */ -typedef struct SVGAOverlayUnit { +typedef +#include "vmware_pack_begin.h" +struct SVGAOverlayUnit { uint32 enabled; uint32 flags; uint32 dataOffset; @@ -916,7 +1190,27 @@ typedef struct SVGAOverlayUnit { uint32 pitches[3]; uint32 dataGMRId; uint32 dstScreenId; -} SVGAOverlayUnit; +} +#include "vmware_pack_end.h" +SVGAOverlayUnit; + + +/* + * Guest display topology + * + * XXX: This structure is not part of the SVGA device's interface, and + * doesn't really belong here. + */ +#define SVGA_INVALID_DISPLAY_ID ((uint32)-1) + +typedef struct SVGADisplayTopology { + uint16 displayId; + uint16 isPrimary; + uint32 width; + uint32 height; + uint32 positionX; + uint32 positionY; +} SVGADisplayTopology; /* @@ -951,10 +1245,10 @@ typedef struct SVGAOverlayUnit { * value of zero means no cloning should happen. */ -#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */ +#define SVGA_SCREEN_MUST_BE_SET (1 << 0) #define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */ -#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */ -#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */ +#define SVGA_SCREEN_IS_PRIMARY (1 << 1) +#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is @@ -977,7 +1271,8 @@ typedef struct SVGAOverlayUnit { #define SVGA_SCREEN_BLANKING (1 << 4) typedef -struct SVGAScreenObject { +#include "vmware_pack_begin.h" +struct { uint32 structSize; /* sizeof(SVGAScreenObject) */ uint32 id; uint32 flags; @@ -995,8 +1290,17 @@ struct SVGAScreenObject { * with SVGA_FIFO_CAP_SCREEN_OBJECT. */ SVGAGuestImage backingStore; + + /* + * The cloneCount field is treated as a hint from the guest that + * the user wants this display to be cloned, cloneCount times. + * + * A value of zero means no cloning should happen. + */ uint32 cloneCount; -} SVGAScreenObject; +} +#include "vmware_pack_end.h" +SVGAScreenObject; /* @@ -1009,7 +1313,7 @@ struct SVGAScreenObject { * Note the holes in the command ID numbers: These commands have been * deprecated, and the old IDs must not be reused. * - * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D + * Command IDs from 1000 to 2999 are reserved for use by the SVGA3D * protocol. * * Each command's parameters are described by the comments and @@ -1020,6 +1324,7 @@ typedef enum { SVGA_CMD_INVALID_CMD = 0, SVGA_CMD_UPDATE = 1, SVGA_CMD_RECT_COPY = 3, + SVGA_CMD_RECT_ROP_COPY = 14, SVGA_CMD_DEFINE_CURSOR = 19, SVGA_CMD_DEFINE_ALPHA_CURSOR = 22, SVGA_CMD_UPDATE_VERBOSE = 25, @@ -1035,9 +1340,14 @@ typedef enum { SVGA_CMD_ANNOTATION_COPY = 40, SVGA_CMD_DEFINE_GMR2 = 41, SVGA_CMD_REMAP_GMR2 = 42, + SVGA_CMD_DEAD = 43, + SVGA_CMD_DEAD_2 = 44, + SVGA_CMD_NOP = 45, + SVGA_CMD_NOP_ERROR = 46, SVGA_CMD_MAX } SVGAFifoCmdId; +#define SVGA_CMD_MAX_DATASIZE (256 * 1024) #define SVGA_CMD_MAX_ARGS 64 @@ -1070,12 +1380,15 @@ typedef enum { */ typedef -struct SVGAFifoCmdUpdate { +#include "vmware_pack_begin.h" +struct { uint32 x; uint32 y; uint32 width; uint32 height; -} SVGAFifoCmdUpdate; +} +#include "vmware_pack_end.h" +SVGAFifoCmdUpdate; /* @@ -1089,14 +1402,44 @@ struct SVGAFifoCmdUpdate { */ typedef -struct SVGAFifoCmdRectCopy { +#include "vmware_pack_begin.h" +struct { + uint32 srcX; + uint32 srcY; + uint32 destX; + uint32 destY; + uint32 width; + uint32 height; +} +#include "vmware_pack_end.h" +SVGAFifoCmdRectCopy; + + +/* + * SVGA_CMD_RECT_ROP_COPY -- + * + * Perform a rectangular DMA transfer from one area of the GFB to + * another, and copy the result to any screens which intersect it. + * The value of ROP may only be SVGA_ROP_COPY, and this command is + * only supported for backwards compatibility reasons. + * + * Availability: + * SVGA_CAP_RECT_COPY + */ + +typedef +#include "vmware_pack_begin.h" +struct { uint32 srcX; uint32 srcY; uint32 destX; uint32 destY; uint32 width; uint32 height; -} SVGAFifoCmdRectCopy; + uint32 rop; +} +#include "vmware_pack_end.h" +SVGAFifoCmdRectRopCopy; /* @@ -1113,7 +1456,8 @@ struct SVGAFifoCmdRectCopy { */ typedef -struct SVGAFifoCmdDefineCursor { +#include "vmware_pack_begin.h" +struct { uint32 id; /* Reserved, must be zero. */ uint32 hotspotX; uint32 hotspotY; @@ -1125,7 +1469,9 @@ struct SVGAFifoCmdDefineCursor { * Followed by scanline data for AND mask, then XOR mask. * Each scanline is padded to a 32-bit boundary. */ -} SVGAFifoCmdDefineCursor; +} +#include "vmware_pack_end.h" +SVGAFifoCmdDefineCursor; /* @@ -1142,14 +1488,17 @@ struct SVGAFifoCmdDefineCursor { */ typedef -struct SVGAFifoCmdDefineAlphaCursor { +#include "vmware_pack_begin.h" +struct { uint32 id; /* Reserved, must be zero. */ uint32 hotspotX; uint32 hotspotY; uint32 width; uint32 height; /* Followed by scanline data */ -} SVGAFifoCmdDefineAlphaCursor; +} +#include "vmware_pack_end.h" +SVGAFifoCmdDefineAlphaCursor; /* @@ -1165,13 +1514,16 @@ struct SVGAFifoCmdDefineAlphaCursor { */ typedef -struct SVGAFifoCmdUpdateVerbose { +#include "vmware_pack_begin.h" +struct { uint32 x; uint32 y; uint32 width; uint32 height; uint32 reason; -} SVGAFifoCmdUpdateVerbose; +} +#include "vmware_pack_end.h" +SVGAFifoCmdUpdateVerbose; /* @@ -1190,14 +1542,17 @@ struct SVGAFifoCmdUpdateVerbose { #define SVGA_ROP_COPY 0x03 typedef -struct SVGAFifoCmdFrontRopFill { +#include "vmware_pack_begin.h" +struct { uint32 color; /* In the same format as the GFB */ uint32 x; uint32 y; uint32 width; uint32 height; uint32 rop; /* Must be SVGA_ROP_COPY */ -} SVGAFifoCmdFrontRopFill; +} +#include "vmware_pack_end.h" +SVGAFifoCmdFrontRopFill; /* @@ -1216,9 +1571,12 @@ struct SVGAFifoCmdFrontRopFill { */ typedef +#include "vmware_pack_begin.h" struct { uint32 fence; -} SVGAFifoCmdFence; +} +#include "vmware_pack_end.h" +SVGAFifoCmdFence; /* @@ -1233,11 +1591,14 @@ struct { */ typedef -struct SVGAFifoCmdEscape { +#include "vmware_pack_begin.h" +struct { uint32 nsid; uint32 size; /* followed by 'size' bytes of data */ -} SVGAFifoCmdEscape; +} +#include "vmware_pack_end.h" +SVGAFifoCmdEscape; /* @@ -1267,9 +1628,12 @@ struct SVGAFifoCmdEscape { */ typedef +#include "vmware_pack_begin.h" struct { SVGAScreenObject screen; /* Variable-length according to version */ -} SVGAFifoCmdDefineScreen; +} +#include "vmware_pack_end.h" +SVGAFifoCmdDefineScreen; /* @@ -1283,9 +1647,12 @@ struct { */ typedef +#include "vmware_pack_begin.h" struct { uint32 screenId; -} SVGAFifoCmdDestroyScreen; +} +#include "vmware_pack_end.h" +SVGAFifoCmdDestroyScreen; /* @@ -1336,11 +1703,14 @@ struct { */ typedef +#include "vmware_pack_begin.h" struct { SVGAGuestPtr ptr; uint32 bytesPerLine; SVGAGMRImageFormat format; -} SVGAFifoCmdDefineGMRFB; +} +#include "vmware_pack_end.h" +SVGAFifoCmdDefineGMRFB; /* @@ -1348,19 +1718,10 @@ struct { * * This is a guest-to-host blit. It performs a DMA operation to * copy a rectangular region of pixels from the current GMRFB to - * one or more Screen Objects. + * a ScreenObject. * * The destination coordinate may be specified relative to a - * screen's origin (if a screen ID is specified) or relative to the - * virtual coordinate system's origin (if the screen ID is - * SVGA_ID_INVALID). The actual destination may span zero or more - * screens, in the case of a virtual destination rect or a rect - * which extends off the edge of the specified screen. - * - * This command writes to the screen's "base layer": the underlying - * framebuffer which exists below any cursor or video overlays. No - * action is necessary to explicitly hide or update any overlays - * which exist on top of the updated region. + * screen's origin. The provided screen ID must be valid. * * The SVGA device is guaranteed to finish reading from the GMRFB * by the time any subsequent FENCE commands are reached. @@ -1373,46 +1734,27 @@ struct { */ typedef +#include "vmware_pack_begin.h" struct { SVGASignedPoint srcOrigin; SVGASignedRect destRect; uint32 destScreenId; -} SVGAFifoCmdBlitGMRFBToScreen; +} +#include "vmware_pack_end.h" +SVGAFifoCmdBlitGMRFBToScreen; /* * SVGA_CMD_BLIT_SCREEN_TO_GMRFB -- * * This is a host-to-guest blit. It performs a DMA operation to - * copy a rectangular region of pixels from a single Screen Object + * copy a rectangular region of pixels from a single ScreenObject * back to the current GMRFB. * - * Usage note: This command should be used rarely. It will - * typically be inefficient, but it is necessary for some types of - * synchronization between 3D (GPU) and 2D (CPU) rendering into - * overlapping areas of a screen. - * * The source coordinate is specified relative to a screen's - * origin. The provided screen ID must be valid. If any parameters + * origin. The provided screen ID must be valid. If any parameters * are invalid, the resulting pixel values are undefined. * - * This command reads the screen's "base layer". Overlays like - * video and cursor are not included, but any data which was sent - * using a blit-to-screen primitive will be available, no matter - * whether the data's original source was the GMRFB or the 3D - * acceleration hardware. - * - * Note that our guest-to-host blits and host-to-guest blits aren't - * symmetric in their current implementation. While the parameters - * are identical, host-to-guest blits are a lot less featureful. - * They do not support clipping: If the source parameters don't - * fully fit within a screen, the blit fails. They must originate - * from exactly one screen. Virtual coordinates are not directly - * supported. - * - * Host-to-guest blits do support the same set of GMRFB formats - * offered by guest-to-host blits. - * * The SVGA device is guaranteed to finish writing to the GMRFB by * the time any subsequent FENCE commands are reached. * @@ -1421,77 +1763,57 @@ struct { */ typedef +#include "vmware_pack_begin.h" struct { SVGASignedPoint destOrigin; SVGASignedRect srcRect; uint32 srcScreenId; -} SVGAFifoCmdBlitScreenToGMRFB; +} +#include "vmware_pack_end.h" +SVGAFifoCmdBlitScreenToGMRFB; /* * SVGA_CMD_ANNOTATION_FILL -- * - * This is a blit annotation. This command stores a small piece of - * device state which is consumed by the next blit-to-screen - * command. The state is only cleared by commands which are - * specifically documented as consuming an annotation. Other - * commands (such as ESCAPEs for debugging) may intervene between - * the annotation and its associated blit. - * - * This annotation is a promise about the contents of the next - * blit: The video driver is guaranteeing that all pixels in that - * blit will have the same value, specified here as a color in - * SVGAColorBGRX format. - * - * The SVGA device can still render the blit correctly even if it - * ignores this annotation, but the annotation may allow it to - * perform the blit more efficiently, for example by ignoring the - * source data and performing a fill in hardware. - * - * This annotation is most important for performance when the - * user's display is being remoted over a network connection. + * The annotation commands have been deprecated, should not be used + * by new drivers. They used to provide performance hints to the SVGA + * device about the content of screen updates, but newer SVGA devices + * ignore these. * * Availability: * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 */ typedef +#include "vmware_pack_begin.h" struct { SVGAColorBGRX color; -} SVGAFifoCmdAnnotationFill; +} +#include "vmware_pack_end.h" +SVGAFifoCmdAnnotationFill; /* * SVGA_CMD_ANNOTATION_COPY -- * - * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more - * information about annotations. - * - * This annotation is a promise about the contents of the next - * blit: The video driver is guaranteeing that all pixels in that - * blit will have the same value as those which already exist at an - * identically-sized region on the same or a different screen. - * - * Note that the source pixels for the COPY in this annotation are - * sampled before applying the anqnotation's associated blit. They - * are allowed to overlap with the blit's destination pixels. - * - * The copy source rectangle is specified the same way as the blit - * destination: it can be a rectangle which spans zero or more - * screens, specified relative to either a screen or to the virtual - * coordinate system's origin. If the source rectangle includes - * pixels which are not from exactly one screen, the results are - * undefined. + * The annotation commands have been deprecated, should not be used + * by new drivers. They used to provide performance hints to the SVGA + * device about the content of screen updates, but newer SVGA devices + * ignore these. * * Availability: * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 */ typedef +#include "vmware_pack_begin.h" struct { SVGASignedPoint srcOrigin; uint32 srcScreenId; -} SVGAFifoCmdAnnotationCopy; +} +#include "vmware_pack_end.h" +SVGAFifoCmdAnnotationCopy; /* @@ -1504,10 +1826,13 @@ struct { */ typedef +#include "vmware_pack_begin.h" struct { uint32 gmrId; uint32 numPages; -} SVGAFifoCmdDefineGMR2; +} +#include "vmware_pack_end.h" +SVGAFifoCmdDefineGMR2; /* @@ -1546,6 +1871,7 @@ typedef enum { } SVGARemapGMR2Flags; typedef +#include "vmware_pack_begin.h" struct { uint32 gmrId; SVGARemapGMR2Flags flags; @@ -1559,6 +1885,52 @@ struct { * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry. */ -} SVGAFifoCmdRemapGMR2; +} +#include "vmware_pack_end.h" +SVGAFifoCmdRemapGMR2; + + +/* + * Size of SVGA device memory such as frame buffer and FIFO. + */ +#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) /* bytes */ +#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024) +#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024) +#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024) +#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024) +#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024) +#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024) +#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024) + +#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */ + +/* + * To simplify autoDetect display configuration, support a minimum of + * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated: + * numDisplays = 2 + * maxWidth = numDisplay * 1920 = 3840 + * maxHeight = rotated width of single monitor = 1920 + * vramSize = maxWidth * maxHeight * 4 = 29491200 + */ +#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024) + +#if defined(VMX86_SERVER) +#define SVGA_VRAM_SIZE (4 * 1024 * 1024) +#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024) +#define SVGA_FIFO_SIZE (256 * 1024) +#define SVGA_FIFO_SIZE_3D (516 * 1024) +#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024) +#define SVGA_AUTODETECT_DEFAULT FALSE +#else +#define SVGA_VRAM_SIZE (16 * 1024 * 1024) +#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE +#define SVGA_FIFO_SIZE (2 * 1024 * 1024) +#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE +#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024) +#define SVGA_AUTODETECT_DEFAULT TRUE +#endif + +#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024) +#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024) #endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h new file mode 100644 index 000000000000..2e8ba4df8de9 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h @@ -0,0 +1,46 @@ +/********************************************************** + * Copyright 2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ +#ifndef _VM_BASIC_TYPES_H_ +#define _VM_BASIC_TYPES_H_ +#include <linux/kernel.h> + +typedef u32 uint32; +typedef s32 int32; +typedef u64 uint64; +typedef u16 uint16; +typedef s16 int16; +typedef u8 uint8; +typedef s8 int8; + +typedef uint64 PA; +typedef uint32 PPN; +typedef uint64 PPN64; + +typedef bool Bool; + +#define MAX_UINT32 U32_MAX +#define MAX_UINT16 U16_MAX + +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h new file mode 100644 index 000000000000..120eab830eaf --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h @@ -0,0 +1,21 @@ +#ifndef _VM_BASIC_TYPES_H_ +#define _VM_BASIC_TYPES_H_ +#include <linux/kernel.h> + +typedef u32 uint32; +typedef s32 int32; +typedef u64 uint64; +typedef u16 uint16; +typedef s16 int16; +typedef u8 uint8; +typedef s8 int8; + +typedef uint64 PA; +typedef uint32 PPN; +typedef uint64 PPN64; + +typedef bool Bool; + +#define MAX_UINT32 U32_MAX + +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h new file mode 100644 index 000000000000..7e7b0ce34aa2 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h @@ -0,0 +1,25 @@ +/********************************************************** + * Copyright 2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ +#include <linux/compiler.h> diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h new file mode 100644 index 000000000000..e2e440ed3d44 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h @@ -0,0 +1,25 @@ +/********************************************************** + * Copyright 2015 VMware, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ +__packed diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h deleted file mode 100644 index f58dc7dd15c5..000000000000 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ /dev/null @@ -1,2627 +0,0 @@ -/********************************************************** - * Copyright 1998-2009 VMware, Inc. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - **********************************************************/ - -/* - * svga3d_reg.h -- - * - * SVGA 3D hardware definitions - */ - -#ifndef _SVGA3D_REG_H_ -#define _SVGA3D_REG_H_ - -#include "svga_reg.h" - -typedef uint32 PPN; -typedef __le64 PPN64; - -/* - * 3D Hardware Version - * - * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo - * register. Is set by the host and read by the guest. This lets - * us make new guest drivers which are backwards-compatible with old - * SVGA hardware revisions. It does not let us support old guest - * drivers. Good enough for now. - * - */ - -#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) -#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16) -#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF) - -typedef enum { - SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1), - SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2), - SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3), - SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), - SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), - SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), - SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1), - SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1, -} SVGA3dHardwareVersion; - -/* - * Generic Types - */ - -typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ -#define SVGA3D_NUM_CLIPPLANES 6 -#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8 -#define SVGA3D_MAX_CONTEXT_IDS 256 -#define SVGA3D_MAX_SURFACE_IDS (32 * 1024) - -#define SVGA3D_NUM_TEXTURE_UNITS 32 -#define SVGA3D_NUM_LIGHTS 8 - -/* - * Surface formats. - * - * If you modify this list, be sure to keep GLUtil.c in sync. It - * includes the internal format definition of each surface in - * GLUtil_ConvertSurfaceFormat, and it contains a table of - * human-readable names in GLUtil_GetFormatName. - */ - -typedef enum SVGA3dSurfaceFormat { - SVGA3D_FORMAT_MIN = 0, - SVGA3D_FORMAT_INVALID = 0, - - SVGA3D_X8R8G8B8 = 1, - SVGA3D_A8R8G8B8 = 2, - - SVGA3D_R5G6B5 = 3, - SVGA3D_X1R5G5B5 = 4, - SVGA3D_A1R5G5B5 = 5, - SVGA3D_A4R4G4B4 = 6, - - SVGA3D_Z_D32 = 7, - SVGA3D_Z_D16 = 8, - SVGA3D_Z_D24S8 = 9, - SVGA3D_Z_D15S1 = 10, - - SVGA3D_LUMINANCE8 = 11, - SVGA3D_LUMINANCE4_ALPHA4 = 12, - SVGA3D_LUMINANCE16 = 13, - SVGA3D_LUMINANCE8_ALPHA8 = 14, - - SVGA3D_DXT1 = 15, - SVGA3D_DXT2 = 16, - SVGA3D_DXT3 = 17, - SVGA3D_DXT4 = 18, - SVGA3D_DXT5 = 19, - - SVGA3D_BUMPU8V8 = 20, - SVGA3D_BUMPL6V5U5 = 21, - SVGA3D_BUMPX8L8V8U8 = 22, - SVGA3D_BUMPL8V8U8 = 23, - - SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ - SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ - - SVGA3D_A2R10G10B10 = 26, - - /* signed formats */ - SVGA3D_V8U8 = 27, - SVGA3D_Q8W8V8U8 = 28, - SVGA3D_CxV8U8 = 29, - - /* mixed formats */ - SVGA3D_X8L8V8U8 = 30, - SVGA3D_A2W10V10U10 = 31, - - SVGA3D_ALPHA8 = 32, - - /* Single- and dual-component floating point formats */ - SVGA3D_R_S10E5 = 33, - SVGA3D_R_S23E8 = 34, - SVGA3D_RG_S10E5 = 35, - SVGA3D_RG_S23E8 = 36, - - SVGA3D_BUFFER = 37, - - SVGA3D_Z_D24X8 = 38, - - SVGA3D_V16U16 = 39, - - SVGA3D_G16R16 = 40, - SVGA3D_A16B16G16R16 = 41, - - /* Packed Video formats */ - SVGA3D_UYVY = 42, - SVGA3D_YUY2 = 43, - - /* Planar video formats */ - SVGA3D_NV12 = 44, - - /* Video format with alpha */ - SVGA3D_AYUV = 45, - - SVGA3D_R32G32B32A32_TYPELESS = 46, - SVGA3D_R32G32B32A32_FLOAT = 25, - SVGA3D_R32G32B32A32_UINT = 47, - SVGA3D_R32G32B32A32_SINT = 48, - SVGA3D_R32G32B32_TYPELESS = 49, - SVGA3D_R32G32B32_FLOAT = 50, - SVGA3D_R32G32B32_UINT = 51, - SVGA3D_R32G32B32_SINT = 52, - SVGA3D_R16G16B16A16_TYPELESS = 53, - SVGA3D_R16G16B16A16_FLOAT = 24, - SVGA3D_R16G16B16A16_UNORM = 41, - SVGA3D_R16G16B16A16_UINT = 54, - SVGA3D_R16G16B16A16_SNORM = 55, - SVGA3D_R16G16B16A16_SINT = 56, - SVGA3D_R32G32_TYPELESS = 57, - SVGA3D_R32G32_FLOAT = 36, - SVGA3D_R32G32_UINT = 58, - SVGA3D_R32G32_SINT = 59, - SVGA3D_R32G8X24_TYPELESS = 60, - SVGA3D_D32_FLOAT_S8X24_UINT = 61, - SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, - SVGA3D_X32_TYPELESS_G8X24_UINT = 63, - SVGA3D_R10G10B10A2_TYPELESS = 64, - SVGA3D_R10G10B10A2_UNORM = 26, - SVGA3D_R10G10B10A2_UINT = 65, - SVGA3D_R11G11B10_FLOAT = 66, - SVGA3D_R8G8B8A8_TYPELESS = 67, - SVGA3D_R8G8B8A8_UNORM = 68, - SVGA3D_R8G8B8A8_UNORM_SRGB = 69, - SVGA3D_R8G8B8A8_UINT = 70, - SVGA3D_R8G8B8A8_SNORM = 28, - SVGA3D_R8G8B8A8_SINT = 71, - SVGA3D_R16G16_TYPELESS = 72, - SVGA3D_R16G16_FLOAT = 35, - SVGA3D_R16G16_UNORM = 40, - SVGA3D_R16G16_UINT = 73, - SVGA3D_R16G16_SNORM = 39, - SVGA3D_R16G16_SINT = 74, - SVGA3D_R32_TYPELESS = 75, - SVGA3D_D32_FLOAT = 76, - SVGA3D_R32_FLOAT = 34, - SVGA3D_R32_UINT = 77, - SVGA3D_R32_SINT = 78, - SVGA3D_R24G8_TYPELESS = 79, - SVGA3D_D24_UNORM_S8_UINT = 80, - SVGA3D_R24_UNORM_X8_TYPELESS = 81, - SVGA3D_X24_TYPELESS_G8_UINT = 82, - SVGA3D_R8G8_TYPELESS = 83, - SVGA3D_R8G8_UNORM = 84, - SVGA3D_R8G8_UINT = 85, - SVGA3D_R8G8_SNORM = 27, - SVGA3D_R8G8_SINT = 86, - SVGA3D_R16_TYPELESS = 87, - SVGA3D_R16_FLOAT = 33, - SVGA3D_D16_UNORM = 8, - SVGA3D_R16_UNORM = 88, - SVGA3D_R16_UINT = 89, - SVGA3D_R16_SNORM = 90, - SVGA3D_R16_SINT = 91, - SVGA3D_R8_TYPELESS = 92, - SVGA3D_R8_UNORM = 93, - SVGA3D_R8_UINT = 94, - SVGA3D_R8_SNORM = 95, - SVGA3D_R8_SINT = 96, - SVGA3D_A8_UNORM = 32, - SVGA3D_R1_UNORM = 97, - SVGA3D_R9G9B9E5_SHAREDEXP = 98, - SVGA3D_R8G8_B8G8_UNORM = 99, - SVGA3D_G8R8_G8B8_UNORM = 100, - SVGA3D_BC1_TYPELESS = 101, - SVGA3D_BC1_UNORM = 15, - SVGA3D_BC1_UNORM_SRGB = 102, - SVGA3D_BC2_TYPELESS = 103, - SVGA3D_BC2_UNORM = 17, - SVGA3D_BC2_UNORM_SRGB = 104, - SVGA3D_BC3_TYPELESS = 105, - SVGA3D_BC3_UNORM = 19, - SVGA3D_BC3_UNORM_SRGB = 106, - SVGA3D_BC4_TYPELESS = 107, - SVGA3D_BC4_UNORM = 108, - SVGA3D_BC4_SNORM = 109, - SVGA3D_BC5_TYPELESS = 110, - SVGA3D_BC5_UNORM = 111, - SVGA3D_BC5_SNORM = 112, - SVGA3D_B5G6R5_UNORM = 3, - SVGA3D_B5G5R5A1_UNORM = 5, - SVGA3D_B8G8R8A8_UNORM = 2, - SVGA3D_B8G8R8X8_UNORM = 1, - SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, - SVGA3D_B8G8R8A8_TYPELESS = 114, - SVGA3D_B8G8R8A8_UNORM_SRGB = 115, - SVGA3D_B8G8R8X8_TYPELESS = 116, - SVGA3D_B8G8R8X8_UNORM_SRGB = 117, - - /* Advanced D3D9 depth formats. */ - SVGA3D_Z_DF16 = 118, - SVGA3D_Z_DF24 = 119, - SVGA3D_Z_D24S8_INT = 120, - - /* Planar video formats. */ - SVGA3D_YV12 = 121, - - SVGA3D_FORMAT_MAX = 122, -} SVGA3dSurfaceFormat; - -typedef uint32 SVGA3dColor; /* a, r, g, b */ - -/* - * These match the D3DFORMAT_OP definitions used by Direct3D. We need - * them so that we can query the host for what the supported surface - * operations are (when we're using the D3D backend, in particular), - * and so we can send those operations to the guest. - */ -typedef enum { - SVGA3DFORMAT_OP_TEXTURE = 0x00000001, - SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002, - SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004, - SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008, - SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010, - SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040, - SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080, - -/* - * This format can be used as a render target if the current display mode - * is the same depth if the alpha channel is ignored. e.g. if the device - * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the - * format op list entry for A8R8G8B8 should have this cap. - */ - SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100, - -/* - * This format contains DirectDraw support (including Flip). This flag - * should not to be set on alpha formats. - */ - SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400, - -/* - * The rasterizer can support some level of Direct3D support in this format - * and implies that the driver can create a Context in this mode (for some - * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE - * flag must also be set. - */ - SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800, - -/* - * This is set for a private format when the driver has put the bpp in - * the structure. - */ - SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000, - -/* - * Indicates that this format can be converted to any RGB format for which - * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified - */ - SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000, - -/* - * Indicates that this format can be used to create offscreen plain surfaces. - */ - SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000, - -/* - * Indicated that this format can be read as an SRGB texture (meaning that the - * sampler will linearize the looked up data) - */ - SVGA3DFORMAT_OP_SRGBREAD = 0x00008000, - -/* - * Indicates that this format can be used in the bumpmap instructions - */ - SVGA3DFORMAT_OP_BUMPMAP = 0x00010000, - -/* - * Indicates that this format can be sampled by the displacement map sampler - */ - SVGA3DFORMAT_OP_DMAP = 0x00020000, - -/* - * Indicates that this format cannot be used with texture filtering - */ - SVGA3DFORMAT_OP_NOFILTER = 0x00040000, - -/* - * Indicates that format conversions are supported to this RGB format if - * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format. - */ - SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000, - -/* - * Indicated that this format can be written as an SRGB target (meaning that the - * pixel pipe will DE-linearize data on output to format) - */ - SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000, - -/* - * Indicates that this format cannot be used with alpha blending - */ - SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000, - -/* - * Indicates that the device can auto-generated sublevels for resources - * of this format - */ - SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000, - -/* - * Indicates that this format can be used by vertex texture sampler - */ - SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000, - -/* - * Indicates that this format supports neither texture coordinate wrap - * modes, nor mipmapping - */ - SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000 -} SVGA3dFormatOp; - -/* - * This structure is a conversion of SVGA3DFORMAT_OP_*. - * Entries must be located at the same position. - */ -typedef union { - uint32 value; - struct { - uint32 texture : 1; - uint32 volumeTexture : 1; - uint32 cubeTexture : 1; - uint32 offscreenRenderTarget : 1; - uint32 sameFormatRenderTarget : 1; - uint32 unknown1 : 1; - uint32 zStencil : 1; - uint32 zStencilArbitraryDepth : 1; - uint32 sameFormatUpToAlpha : 1; - uint32 unknown2 : 1; - uint32 displayMode : 1; - uint32 acceleration3d : 1; - uint32 pixelSize : 1; - uint32 convertToARGB : 1; - uint32 offscreenPlain : 1; - uint32 sRGBRead : 1; - uint32 bumpMap : 1; - uint32 dmap : 1; - uint32 noFilter : 1; - uint32 memberOfGroupARGB : 1; - uint32 sRGBWrite : 1; - uint32 noAlphaBlend : 1; - uint32 autoGenMipMap : 1; - uint32 vertexTexture : 1; - uint32 noTexCoordWrapNorMip : 1; - }; -} SVGA3dSurfaceFormatCaps; - -/* - * SVGA_3D_CMD_SETRENDERSTATE Types. All value types - * must fit in a uint32. - */ - -typedef enum { - SVGA3D_RS_INVALID = 0, - SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */ - SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */ - SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */ - SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */ - SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */ - SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */ - SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */ - SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */ - SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */ - SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */ - SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */ - SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */ - SVGA3D_RS_STENCILREF = 13, /* uint32 */ - SVGA3D_RS_STENCILMASK = 14, /* uint32 */ - SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */ - SVGA3D_RS_FOGSTART = 16, /* float */ - SVGA3D_RS_FOGEND = 17, /* float */ - SVGA3D_RS_FOGDENSITY = 18, /* float */ - SVGA3D_RS_POINTSIZE = 19, /* float */ - SVGA3D_RS_POINTSIZEMIN = 20, /* float */ - SVGA3D_RS_POINTSIZEMAX = 21, /* float */ - SVGA3D_RS_POINTSCALE_A = 22, /* float */ - SVGA3D_RS_POINTSCALE_B = 23, /* float */ - SVGA3D_RS_POINTSCALE_C = 24, /* float */ - SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */ - SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */ - SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */ - SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */ - SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */ - SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */ - SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */ - SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */ - SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */ - SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */ - SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */ - SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */ - SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */ - SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */ - SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */ - SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */ - SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */ - SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */ - SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */ - SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */ - SVGA3D_RS_ZBIAS = 45, /* float */ - SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */ - SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */ - SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */ - SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */ - SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */ - SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */ - SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */ - SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */ - SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */ - SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */ - SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */ - SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */ - SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */ - SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */ - SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */ - SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */ - SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */ - SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */ - SVGA3D_RS_DEPTHBIAS = 64, /* float */ - - - /* - * Output Gamma Level - * - * Output gamma effects the gamma curve of colors that are output from the - * rendering pipeline. A value of 1.0 specifies a linear color space. If the - * value is <= 0.0, gamma correction is ignored and linear color space is - * used. - */ - - SVGA3D_RS_OUTPUTGAMMA = 65, /* float */ - SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */ - SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */ - SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */ - SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */ - SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */ - SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */ - SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */ - SVGA3D_RS_TWEENFACTOR = 88, /* float */ - SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */ - SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */ - SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */ - SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */ - SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */ - SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */ - SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */ - SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */ - SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */ - SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */ - SVGA3D_RS_LINEWIDTH = 99, /* float */ - SVGA3D_RS_MAX -} SVGA3dRenderStateName; - -typedef enum { - SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0, - SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1, - SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2, - SVGA3D_TRANSPARENCYANTIALIAS_MAX -} SVGA3dTransparencyAntialiasType; - -typedef enum { - SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */ - SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */ - SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */ -} SVGA3dVertexMaterial; - -typedef enum { - SVGA3D_FILLMODE_INVALID = 0, - SVGA3D_FILLMODE_POINT = 1, - SVGA3D_FILLMODE_LINE = 2, - SVGA3D_FILLMODE_FILL = 3, - SVGA3D_FILLMODE_MAX -} SVGA3dFillModeType; - - -typedef -union { - struct { - uint16 mode; /* SVGA3dFillModeType */ - uint16 face; /* SVGA3dFace */ - }; - uint32 uintValue; -} SVGA3dFillMode; - -typedef enum { - SVGA3D_SHADEMODE_INVALID = 0, - SVGA3D_SHADEMODE_FLAT = 1, - SVGA3D_SHADEMODE_SMOOTH = 2, - SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */ - SVGA3D_SHADEMODE_MAX -} SVGA3dShadeMode; - -typedef -union { - struct { - uint16 repeat; - uint16 pattern; - }; - uint32 uintValue; -} SVGA3dLinePattern; - -typedef enum { - SVGA3D_BLENDOP_INVALID = 0, - SVGA3D_BLENDOP_ZERO = 1, - SVGA3D_BLENDOP_ONE = 2, - SVGA3D_BLENDOP_SRCCOLOR = 3, - SVGA3D_BLENDOP_INVSRCCOLOR = 4, - SVGA3D_BLENDOP_SRCALPHA = 5, - SVGA3D_BLENDOP_INVSRCALPHA = 6, - SVGA3D_BLENDOP_DESTALPHA = 7, - SVGA3D_BLENDOP_INVDESTALPHA = 8, - SVGA3D_BLENDOP_DESTCOLOR = 9, - SVGA3D_BLENDOP_INVDESTCOLOR = 10, - SVGA3D_BLENDOP_SRCALPHASAT = 11, - SVGA3D_BLENDOP_BLENDFACTOR = 12, - SVGA3D_BLENDOP_INVBLENDFACTOR = 13, - SVGA3D_BLENDOP_MAX -} SVGA3dBlendOp; - -typedef enum { - SVGA3D_BLENDEQ_INVALID = 0, - SVGA3D_BLENDEQ_ADD = 1, - SVGA3D_BLENDEQ_SUBTRACT = 2, - SVGA3D_BLENDEQ_REVSUBTRACT = 3, - SVGA3D_BLENDEQ_MINIMUM = 4, - SVGA3D_BLENDEQ_MAXIMUM = 5, - SVGA3D_BLENDEQ_MAX -} SVGA3dBlendEquation; - -typedef enum { - SVGA3D_FRONTWINDING_INVALID = 0, - SVGA3D_FRONTWINDING_CW = 1, - SVGA3D_FRONTWINDING_CCW = 2, - SVGA3D_FRONTWINDING_MAX -} SVGA3dFrontWinding; - -typedef enum { - SVGA3D_FACE_INVALID = 0, - SVGA3D_FACE_NONE = 1, - SVGA3D_FACE_FRONT = 2, - SVGA3D_FACE_BACK = 3, - SVGA3D_FACE_FRONT_BACK = 4, - SVGA3D_FACE_MAX -} SVGA3dFace; - -/* - * The order and the values should not be changed - */ - -typedef enum { - SVGA3D_CMP_INVALID = 0, - SVGA3D_CMP_NEVER = 1, - SVGA3D_CMP_LESS = 2, - SVGA3D_CMP_EQUAL = 3, - SVGA3D_CMP_LESSEQUAL = 4, - SVGA3D_CMP_GREATER = 5, - SVGA3D_CMP_NOTEQUAL = 6, - SVGA3D_CMP_GREATEREQUAL = 7, - SVGA3D_CMP_ALWAYS = 8, - SVGA3D_CMP_MAX -} SVGA3dCmpFunc; - -/* - * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows - * the fog factor to be specified in the alpha component of the specular - * (a.k.a. secondary) vertex color. - */ -typedef enum { - SVGA3D_FOGFUNC_INVALID = 0, - SVGA3D_FOGFUNC_EXP = 1, - SVGA3D_FOGFUNC_EXP2 = 2, - SVGA3D_FOGFUNC_LINEAR = 3, - SVGA3D_FOGFUNC_PER_VERTEX = 4 -} SVGA3dFogFunction; - -/* - * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex - * or per-pixel basis. - */ -typedef enum { - SVGA3D_FOGTYPE_INVALID = 0, - SVGA3D_FOGTYPE_VERTEX = 1, - SVGA3D_FOGTYPE_PIXEL = 2, - SVGA3D_FOGTYPE_MAX = 3 -} SVGA3dFogType; - -/* - * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is - * computed using the eye Z value of each pixel (or vertex), whereas range- - * based fog is computed using the actual distance (range) to the eye. - */ -typedef enum { - SVGA3D_FOGBASE_INVALID = 0, - SVGA3D_FOGBASE_DEPTHBASED = 1, - SVGA3D_FOGBASE_RANGEBASED = 2, - SVGA3D_FOGBASE_MAX = 3 -} SVGA3dFogBase; - -typedef enum { - SVGA3D_STENCILOP_INVALID = 0, - SVGA3D_STENCILOP_KEEP = 1, - SVGA3D_STENCILOP_ZERO = 2, - SVGA3D_STENCILOP_REPLACE = 3, - SVGA3D_STENCILOP_INCRSAT = 4, - SVGA3D_STENCILOP_DECRSAT = 5, - SVGA3D_STENCILOP_INVERT = 6, - SVGA3D_STENCILOP_INCR = 7, - SVGA3D_STENCILOP_DECR = 8, - SVGA3D_STENCILOP_MAX -} SVGA3dStencilOp; - -typedef enum { - SVGA3D_CLIPPLANE_0 = (1 << 0), - SVGA3D_CLIPPLANE_1 = (1 << 1), - SVGA3D_CLIPPLANE_2 = (1 << 2), - SVGA3D_CLIPPLANE_3 = (1 << 3), - SVGA3D_CLIPPLANE_4 = (1 << 4), - SVGA3D_CLIPPLANE_5 = (1 << 5), -} SVGA3dClipPlanes; - -typedef enum { - SVGA3D_CLEAR_COLOR = 0x1, - SVGA3D_CLEAR_DEPTH = 0x2, - SVGA3D_CLEAR_STENCIL = 0x4 -} SVGA3dClearFlag; - -typedef enum { - SVGA3D_RT_DEPTH = 0, - SVGA3D_RT_STENCIL = 1, - SVGA3D_RT_COLOR0 = 2, - SVGA3D_RT_COLOR1 = 3, - SVGA3D_RT_COLOR2 = 4, - SVGA3D_RT_COLOR3 = 5, - SVGA3D_RT_COLOR4 = 6, - SVGA3D_RT_COLOR5 = 7, - SVGA3D_RT_COLOR6 = 8, - SVGA3D_RT_COLOR7 = 9, - SVGA3D_RT_MAX, - SVGA3D_RT_INVALID = ((uint32)-1), -} SVGA3dRenderTargetType; - -#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1) - -typedef -union { - struct { - uint32 red : 1; - uint32 green : 1; - uint32 blue : 1; - uint32 alpha : 1; - }; - uint32 uintValue; -} SVGA3dColorMask; - -typedef enum { - SVGA3D_VBLEND_DISABLE = 0, - SVGA3D_VBLEND_1WEIGHT = 1, - SVGA3D_VBLEND_2WEIGHT = 2, - SVGA3D_VBLEND_3WEIGHT = 3, -} SVGA3dVertexBlendFlags; - -typedef enum { - SVGA3D_WRAPCOORD_0 = 1 << 0, - SVGA3D_WRAPCOORD_1 = 1 << 1, - SVGA3D_WRAPCOORD_2 = 1 << 2, - SVGA3D_WRAPCOORD_3 = 1 << 3, - SVGA3D_WRAPCOORD_ALL = 0xF, -} SVGA3dWrapFlags; - -/* - * SVGA_3D_CMD_TEXTURESTATE Types. All value types - * must fit in a uint32. - */ - -typedef enum { - SVGA3D_TS_INVALID = 0, - SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */ - SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */ - SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */ - SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */ - SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */ - SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */ - SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */ - SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */ - SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */ - SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */ - SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */ - SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */ - SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */ - SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */ - SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */ - SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */ - SVGA3D_TS_BUMPENVMAT00 = 17, /* float */ - SVGA3D_TS_BUMPENVMAT01 = 18, /* float */ - SVGA3D_TS_BUMPENVMAT10 = 19, /* float */ - SVGA3D_TS_BUMPENVMAT11 = 20, /* float */ - SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */ - SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */ - SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */ - SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */ - - - /* - * Sampler Gamma Level - * - * Sampler gamma effects the color of samples taken from the sampler. A - * value of 1.0 will produce linear samples. If the value is <= 0.0 the - * gamma value is ignored and a linear space is used. - */ - - SVGA3D_TS_GAMMA = 25, /* float */ - SVGA3D_TS_BUMPENVLSCALE = 26, /* float */ - SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */ - SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */ - SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */ - SVGA3D_TS_MAX -} SVGA3dTextureStateName; - -typedef enum { - SVGA3D_TC_INVALID = 0, - SVGA3D_TC_DISABLE = 1, - SVGA3D_TC_SELECTARG1 = 2, - SVGA3D_TC_SELECTARG2 = 3, - SVGA3D_TC_MODULATE = 4, - SVGA3D_TC_ADD = 5, - SVGA3D_TC_ADDSIGNED = 6, - SVGA3D_TC_SUBTRACT = 7, - SVGA3D_TC_BLENDTEXTUREALPHA = 8, - SVGA3D_TC_BLENDDIFFUSEALPHA = 9, - SVGA3D_TC_BLENDCURRENTALPHA = 10, - SVGA3D_TC_BLENDFACTORALPHA = 11, - SVGA3D_TC_MODULATE2X = 12, - SVGA3D_TC_MODULATE4X = 13, - SVGA3D_TC_DSDT = 14, - SVGA3D_TC_DOTPRODUCT3 = 15, - SVGA3D_TC_BLENDTEXTUREALPHAPM = 16, - SVGA3D_TC_ADDSIGNED2X = 17, - SVGA3D_TC_ADDSMOOTH = 18, - SVGA3D_TC_PREMODULATE = 19, - SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20, - SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21, - SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22, - SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23, - SVGA3D_TC_BUMPENVMAPLUMINANCE = 24, - SVGA3D_TC_MULTIPLYADD = 25, - SVGA3D_TC_LERP = 26, - SVGA3D_TC_MAX -} SVGA3dTextureCombiner; - -#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0) - -typedef enum { - SVGA3D_TEX_ADDRESS_INVALID = 0, - SVGA3D_TEX_ADDRESS_WRAP = 1, - SVGA3D_TEX_ADDRESS_MIRROR = 2, - SVGA3D_TEX_ADDRESS_CLAMP = 3, - SVGA3D_TEX_ADDRESS_BORDER = 4, - SVGA3D_TEX_ADDRESS_MIRRORONCE = 5, - SVGA3D_TEX_ADDRESS_EDGE = 6, - SVGA3D_TEX_ADDRESS_MAX -} SVGA3dTextureAddress; - -/* - * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is - * disabled, and the rasterizer should use the magnification filter instead. - */ -typedef enum { - SVGA3D_TEX_FILTER_NONE = 0, - SVGA3D_TEX_FILTER_NEAREST = 1, - SVGA3D_TEX_FILTER_LINEAR = 2, - SVGA3D_TEX_FILTER_ANISOTROPIC = 3, - SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */ - SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */ - SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */ - SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */ - SVGA3D_TEX_FILTER_MAX -} SVGA3dTextureFilter; - -typedef enum { - SVGA3D_TEX_TRANSFORM_OFF = 0, - SVGA3D_TEX_TRANSFORM_S = (1 << 0), - SVGA3D_TEX_TRANSFORM_T = (1 << 1), - SVGA3D_TEX_TRANSFORM_R = (1 << 2), - SVGA3D_TEX_TRANSFORM_Q = (1 << 3), - SVGA3D_TEX_PROJECTED = (1 << 15), -} SVGA3dTexTransformFlags; - -typedef enum { - SVGA3D_TEXCOORD_GEN_OFF = 0, - SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1, - SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2, - SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3, - SVGA3D_TEXCOORD_GEN_SPHERE = 4, - SVGA3D_TEXCOORD_GEN_MAX -} SVGA3dTextureCoordGen; - -/* - * Texture argument constants for texture combiner - */ -typedef enum { - SVGA3D_TA_INVALID = 0, - SVGA3D_TA_CONSTANT = 1, - SVGA3D_TA_PREVIOUS = 2, - SVGA3D_TA_DIFFUSE = 3, - SVGA3D_TA_TEXTURE = 4, - SVGA3D_TA_SPECULAR = 5, - SVGA3D_TA_MAX -} SVGA3dTextureArgData; - -#define SVGA3D_TM_MASK_LEN 4 - -/* Modifiers for texture argument constants defined above. */ -typedef enum { - SVGA3D_TM_NONE = 0, - SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN), - SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN), -} SVGA3dTextureArgModifier; - -#define SVGA3D_INVALID_ID ((uint32)-1) -#define SVGA3D_MAX_CLIP_PLANES 6 - -/* - * This is the limit to the number of fixed-function texture - * transforms and texture coordinates we can support. It does *not* - * correspond to the number of texture image units (samplers) we - * support! - */ -#define SVGA3D_MAX_TEXTURE_COORDS 8 - -/* - * Vertex declarations - * - * Notes: - * - * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you - * draw with any POSITIONT vertex arrays, the programmable vertex - * pipeline will be implicitly disabled. Drawing will take place as if - * no vertex shader was bound. - */ - -typedef enum { - SVGA3D_DECLUSAGE_POSITION = 0, - SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */ - SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */ - SVGA3D_DECLUSAGE_NORMAL, /* 3 */ - SVGA3D_DECLUSAGE_PSIZE, /* 4 */ - SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */ - SVGA3D_DECLUSAGE_TANGENT, /* 6 */ - SVGA3D_DECLUSAGE_BINORMAL, /* 7 */ - SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */ - SVGA3D_DECLUSAGE_POSITIONT, /* 9 */ - SVGA3D_DECLUSAGE_COLOR, /* 10 */ - SVGA3D_DECLUSAGE_FOG, /* 11 */ - SVGA3D_DECLUSAGE_DEPTH, /* 12 */ - SVGA3D_DECLUSAGE_SAMPLE, /* 13 */ - SVGA3D_DECLUSAGE_MAX -} SVGA3dDeclUsage; - -typedef enum { - SVGA3D_DECLMETHOD_DEFAULT = 0, - SVGA3D_DECLMETHOD_PARTIALU, - SVGA3D_DECLMETHOD_PARTIALV, - SVGA3D_DECLMETHOD_CROSSUV, /* Normal */ - SVGA3D_DECLMETHOD_UV, - SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */ - SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */ -} SVGA3dDeclMethod; - -typedef enum { - SVGA3D_DECLTYPE_FLOAT1 = 0, - SVGA3D_DECLTYPE_FLOAT2 = 1, - SVGA3D_DECLTYPE_FLOAT3 = 2, - SVGA3D_DECLTYPE_FLOAT4 = 3, - SVGA3D_DECLTYPE_D3DCOLOR = 4, - SVGA3D_DECLTYPE_UBYTE4 = 5, - SVGA3D_DECLTYPE_SHORT2 = 6, - SVGA3D_DECLTYPE_SHORT4 = 7, - SVGA3D_DECLTYPE_UBYTE4N = 8, - SVGA3D_DECLTYPE_SHORT2N = 9, - SVGA3D_DECLTYPE_SHORT4N = 10, - SVGA3D_DECLTYPE_USHORT2N = 11, - SVGA3D_DECLTYPE_USHORT4N = 12, - SVGA3D_DECLTYPE_UDEC3 = 13, - SVGA3D_DECLTYPE_DEC3N = 14, - SVGA3D_DECLTYPE_FLOAT16_2 = 15, - SVGA3D_DECLTYPE_FLOAT16_4 = 16, - SVGA3D_DECLTYPE_MAX, -} SVGA3dDeclType; - -/* - * This structure is used for the divisor for geometry instancing; - * it's a direct translation of the Direct3D equivalent. - */ -typedef union { - struct { - /* - * For index data, this number represents the number of instances to draw. - * For instance data, this number represents the number of - * instances/vertex in this stream - */ - uint32 count : 30; - - /* - * This is 1 if this is supposed to be the data that is repeated for - * every instance. - */ - uint32 indexedData : 1; - - /* - * This is 1 if this is supposed to be the per-instance data. - */ - uint32 instanceData : 1; - }; - - uint32 value; -} SVGA3dVertexDivisor; - -typedef enum { - SVGA3D_PRIMITIVE_INVALID = 0, - SVGA3D_PRIMITIVE_TRIANGLELIST = 1, - SVGA3D_PRIMITIVE_POINTLIST = 2, - SVGA3D_PRIMITIVE_LINELIST = 3, - SVGA3D_PRIMITIVE_LINESTRIP = 4, - SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5, - SVGA3D_PRIMITIVE_TRIANGLEFAN = 6, - SVGA3D_PRIMITIVE_MAX -} SVGA3dPrimitiveType; - -typedef enum { - SVGA3D_COORDINATE_INVALID = 0, - SVGA3D_COORDINATE_LEFTHANDED = 1, - SVGA3D_COORDINATE_RIGHTHANDED = 2, - SVGA3D_COORDINATE_MAX -} SVGA3dCoordinateType; - -typedef enum { - SVGA3D_TRANSFORM_INVALID = 0, - SVGA3D_TRANSFORM_WORLD = 1, - SVGA3D_TRANSFORM_VIEW = 2, - SVGA3D_TRANSFORM_PROJECTION = 3, - SVGA3D_TRANSFORM_TEXTURE0 = 4, - SVGA3D_TRANSFORM_TEXTURE1 = 5, - SVGA3D_TRANSFORM_TEXTURE2 = 6, - SVGA3D_TRANSFORM_TEXTURE3 = 7, - SVGA3D_TRANSFORM_TEXTURE4 = 8, - SVGA3D_TRANSFORM_TEXTURE5 = 9, - SVGA3D_TRANSFORM_TEXTURE6 = 10, - SVGA3D_TRANSFORM_TEXTURE7 = 11, - SVGA3D_TRANSFORM_WORLD1 = 12, - SVGA3D_TRANSFORM_WORLD2 = 13, - SVGA3D_TRANSFORM_WORLD3 = 14, - SVGA3D_TRANSFORM_MAX -} SVGA3dTransformType; - -typedef enum { - SVGA3D_LIGHTTYPE_INVALID = 0, - SVGA3D_LIGHTTYPE_POINT = 1, - SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */ - SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */ - SVGA3D_LIGHTTYPE_DIRECTIONAL = 4, - SVGA3D_LIGHTTYPE_MAX -} SVGA3dLightType; - -typedef enum { - SVGA3D_CUBEFACE_POSX = 0, - SVGA3D_CUBEFACE_NEGX = 1, - SVGA3D_CUBEFACE_POSY = 2, - SVGA3D_CUBEFACE_NEGY = 3, - SVGA3D_CUBEFACE_POSZ = 4, - SVGA3D_CUBEFACE_NEGZ = 5, -} SVGA3dCubeFace; - -typedef enum { - SVGA3D_SHADERTYPE_INVALID = 0, - SVGA3D_SHADERTYPE_MIN = 1, - SVGA3D_SHADERTYPE_VS = 1, - SVGA3D_SHADERTYPE_PS = 2, - SVGA3D_SHADERTYPE_MAX = 3, - SVGA3D_SHADERTYPE_GS = 3, -} SVGA3dShaderType; - -#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) - -typedef enum { - SVGA3D_CONST_TYPE_FLOAT = 0, - SVGA3D_CONST_TYPE_INT = 1, - SVGA3D_CONST_TYPE_BOOL = 2, - SVGA3D_CONST_TYPE_MAX -} SVGA3dShaderConstType; - -#define SVGA3D_MAX_SURFACE_FACES 6 - -typedef enum { - SVGA3D_STRETCH_BLT_POINT = 0, - SVGA3D_STRETCH_BLT_LINEAR = 1, - SVGA3D_STRETCH_BLT_MAX -} SVGA3dStretchBltMode; - -typedef enum { - SVGA3D_QUERYTYPE_OCCLUSION = 0, - SVGA3D_QUERYTYPE_MAX -} SVGA3dQueryType; - -typedef enum { - SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */ - SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */ - SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */ - SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */ -} SVGA3dQueryState; - -typedef enum { - SVGA3D_WRITE_HOST_VRAM = 1, - SVGA3D_READ_HOST_VRAM = 2, -} SVGA3dTransferType; - -/* - * The maximum number of vertex arrays we're guaranteed to support in - * SVGA_3D_CMD_DRAWPRIMITIVES. - */ -#define SVGA3D_MAX_VERTEX_ARRAYS 32 - -/* - * The maximum number of primitive ranges we're guaranteed to support - * in SVGA_3D_CMD_DRAWPRIMITIVES. - */ -#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32 - -/* - * Identifiers for commands in the command FIFO. - * - * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of - * the SVGA3D protocol and remain reserved; they should not be used in the - * future. - * - * IDs between 1040 and 1999 (inclusive) are available for use by the - * current SVGA3D protocol. - * - * FIFO clients other than SVGA3D should stay below 1000, or at 2000 - * and up. - */ - -#define SVGA_3D_CMD_LEGACY_BASE 1000 -#define SVGA_3D_CMD_BASE 1040 - -#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */ -#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1 -#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2 -#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3 -#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4 -#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5 -#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6 -#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7 -#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8 -#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9 -#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10 -#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11 -#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12 -#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13 -#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14 -#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15 -#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16 -#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17 -#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */ -#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19 -#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20 -#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21 -#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22 -#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23 -#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24 -#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25 -#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26 -#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27 -#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */ -#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29 -#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30 -#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 -#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 -#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 -#define SVGA_3D_CMD_SCREEN_DMA 1082 -#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083 -#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084 - -#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085 -#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086 -#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087 -#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088 -#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089 -#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090 - -#define SVGA_3D_CMD_SET_OTABLE_BASE 1091 -#define SVGA_3D_CMD_READBACK_OTABLE 1092 - -#define SVGA_3D_CMD_DEFINE_GB_MOB 1093 -#define SVGA_3D_CMD_DESTROY_GB_MOB 1094 -#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095 -#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096 - -#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097 -#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098 -#define SVGA_3D_CMD_BIND_GB_SURFACE 1099 -#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100 -#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101 -#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102 -#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103 -#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104 -#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105 -#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106 - -#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107 -#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108 -#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109 -#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110 -#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111 - -#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112 -#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113 -#define SVGA_3D_CMD_BIND_GB_SHADER 1114 - -#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115 - -#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116 -#define SVGA_3D_CMD_END_GB_QUERY 1117 -#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118 - -#define SVGA_3D_CMD_NOP 1119 - -#define SVGA_3D_CMD_ENABLE_GART 1120 -#define SVGA_3D_CMD_DISABLE_GART 1121 -#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122 -#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123 - -#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124 -#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125 -#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126 -#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127 - -#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128 -#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 - -#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 -#define SVGA_3D_CMD_GB_SCREEN_DMA 1131 -#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 -#define SVGA_3D_CMD_GB_MOB_FENCE 1133 -#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 -#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 -#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 -#define SVGA_3D_CMD_NOP_ERROR 1137 - -#define SVGA_3D_CMD_RESERVED1 1138 -#define SVGA_3D_CMD_RESERVED2 1139 -#define SVGA_3D_CMD_RESERVED3 1140 -#define SVGA_3D_CMD_RESERVED4 1141 -#define SVGA_3D_CMD_RESERVED5 1142 - -#define SVGA_3D_CMD_MAX 1142 -#define SVGA_3D_CMD_FUTURE_MAX 3000 - -/* - * Common substructures used in multiple FIFO commands: - */ - -typedef struct { - union { - struct { - uint16 function; /* SVGA3dFogFunction */ - uint8 type; /* SVGA3dFogType */ - uint8 base; /* SVGA3dFogBase */ - }; - uint32 uintValue; - }; -} SVGA3dFogMode; - -/* - * Uniquely identify one image (a 1D/2D/3D array) from a surface. This - * is a surface ID as well as face/mipmap indices. - */ - -typedef -struct SVGA3dSurfaceImageId { - uint32 sid; - uint32 face; - uint32 mipmap; -} SVGA3dSurfaceImageId; - -typedef -struct SVGA3dGuestImage { - SVGAGuestPtr ptr; - - /* - * A note on interpretation of pitch: This value of pitch is the - * number of bytes between vertically adjacent image - * blocks. Normally this is the number of bytes between the first - * pixel of two adjacent scanlines. With compressed textures, - * however, this may represent the number of bytes between - * compression blocks rather than between rows of pixels. - * - * XXX: Compressed textures currently must be tightly packed in guest memory. - * - * If the image is 1-dimensional, pitch is ignored. - * - * If 'pitch' is zero, the SVGA3D device calculates a pitch value - * assuming each row of blocks is tightly packed. - */ - uint32 pitch; -} SVGA3dGuestImage; - - -/* - * FIFO command format definitions: - */ - -/* - * The data size header following cmdNum for every 3d command - */ -typedef -struct { - uint32 id; - uint32 size; -} SVGA3dCmdHeader; - -/* - * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with - * optional mipmaps and cube faces. - */ - -typedef -struct { - uint32 width; - uint32 height; - uint32 depth; -} SVGA3dSize; - -typedef enum { - SVGA3D_SURFACE_CUBEMAP = (1 << 0), - SVGA3D_SURFACE_HINT_STATIC = (1 << 1), - SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2), - SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3), - SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4), - SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5), - SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6), - SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7), - SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8), - SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9), - SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10), -} SVGA3dSurfaceFlags; - -typedef -struct { - uint32 numMipLevels; -} SVGA3dSurfaceFace; - -typedef -struct { - uint32 sid; - SVGA3dSurfaceFlags surfaceFlags; - SVGA3dSurfaceFormat format; - /* - * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace - * structures must have the same value of numMipLevels field. - * Otherwise, all but the first SVGA3dSurfaceFace structures must have the - * numMipLevels set to 0. - */ - SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; - /* - * Followed by an SVGA3dSize structure for each mip level in each face. - * - * A note on surface sizes: Sizes are always specified in pixels, - * even if the true surface size is not a multiple of the minimum - * block size of the surface's format. For example, a 3x3x1 DXT1 - * compressed texture would actually be stored as a 4x4x1 image in - * memory. - */ -} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */ - -typedef -struct { - uint32 sid; - SVGA3dSurfaceFlags surfaceFlags; - SVGA3dSurfaceFormat format; - /* - * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace - * structures must have the same value of numMipLevels field. - * Otherwise, all but the first SVGA3dSurfaceFace structures must have the - * numMipLevels set to 0. - */ - SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; - uint32 multisampleCount; - SVGA3dTextureFilter autogenFilter; - /* - * Followed by an SVGA3dSize structure for each mip level in each face. - * - * A note on surface sizes: Sizes are always specified in pixels, - * even if the true surface size is not a multiple of the minimum - * block size of the surface's format. For example, a 3x3x1 DXT1 - * compressed texture would actually be stored as a 4x4x1 image in - * memory. - */ -} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */ - -typedef -struct { - uint32 sid; -} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */ - -typedef -struct { - uint32 cid; -} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */ - -typedef -struct { - uint32 cid; -} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */ - -typedef -struct { - uint32 cid; - SVGA3dClearFlag clearFlag; - uint32 color; - float depth; - uint32 stencil; - /* Followed by variable number of SVGA3dRect structures */ -} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */ - -typedef -struct SVGA3dCopyRect { - uint32 x; - uint32 y; - uint32 w; - uint32 h; - uint32 srcx; - uint32 srcy; -} SVGA3dCopyRect; - -typedef -struct SVGA3dCopyBox { - uint32 x; - uint32 y; - uint32 z; - uint32 w; - uint32 h; - uint32 d; - uint32 srcx; - uint32 srcy; - uint32 srcz; -} SVGA3dCopyBox; - -typedef -struct { - uint32 x; - uint32 y; - uint32 w; - uint32 h; -} SVGA3dRect; - -typedef -struct { - uint32 x; - uint32 y; - uint32 z; - uint32 w; - uint32 h; - uint32 d; -} SVGA3dBox; - -typedef -struct { - uint32 x; - uint32 y; - uint32 z; -} SVGA3dPoint; - -typedef -struct { - SVGA3dLightType type; - SVGA3dBool inWorldSpace; - float diffuse[4]; - float specular[4]; - float ambient[4]; - float position[4]; - float direction[4]; - float range; - float falloff; - float attenuation0; - float attenuation1; - float attenuation2; - float theta; - float phi; -} SVGA3dLightData; - -typedef -struct { - uint32 sid; - /* Followed by variable number of SVGA3dCopyRect structures */ -} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */ - -typedef -struct { - SVGA3dRenderStateName state; - union { - uint32 uintValue; - float floatValue; - }; -} SVGA3dRenderState; - -typedef -struct { - uint32 cid; - /* Followed by variable number of SVGA3dRenderState structures */ -} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */ - -typedef -struct { - uint32 cid; - SVGA3dRenderTargetType type; - SVGA3dSurfaceImageId target; -} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */ - -typedef -struct { - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dest; - /* Followed by variable number of SVGA3dCopyBox structures */ -} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */ - -typedef -struct { - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dest; - SVGA3dBox boxSrc; - SVGA3dBox boxDest; - SVGA3dStretchBltMode mode; -} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */ - -typedef -struct { - /* - * If the discard flag is present in a surface DMA operation, the host may - * discard the contents of the current mipmap level and face of the target - * surface before applying the surface DMA contents. - */ - uint32 discard : 1; - - /* - * If the unsynchronized flag is present, the host may perform this upload - * without syncing to pending reads on this surface. - */ - uint32 unsynchronized : 1; - - /* - * Guests *MUST* set the reserved bits to 0 before submitting the command - * suffix as future flags may occupy these bits. - */ - uint32 reserved : 30; -} SVGA3dSurfaceDMAFlags; - -typedef -struct { - SVGA3dGuestImage guest; - SVGA3dSurfaceImageId host; - SVGA3dTransferType transfer; - /* - * Followed by variable number of SVGA3dCopyBox structures. For consistency - * in all clipping logic and coordinate translation, we define the - * "source" in each copyBox as the guest image and the - * "destination" as the host image, regardless of transfer - * direction. - * - * For efficiency, the SVGA3D device is free to copy more data than - * specified. For example, it may round copy boxes outwards such - * that they lie on particular alignment boundaries. - */ -} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */ - -/* - * SVGA3dCmdSurfaceDMASuffix -- - * - * This is a command suffix that will appear after a SurfaceDMA command in - * the FIFO. It contains some extra information that hosts may use to - * optimize performance or protect the guest. This suffix exists to preserve - * backwards compatibility while also allowing for new functionality to be - * implemented. - */ - -typedef -struct { - uint32 suffixSize; - - /* - * The maximum offset is used to determine the maximum offset from the - * guestPtr base address that will be accessed or written to during this - * surfaceDMA. If the suffix is supported, the host will respect this - * boundary while performing surface DMAs. - * - * Defaults to MAX_UINT32 - */ - uint32 maximumOffset; - - /* - * A set of flags that describes optimizations that the host may perform - * while performing this surface DMA operation. The guest should never rely - * on behaviour that is different when these flags are set for correctness. - * - * Defaults to 0 - */ - SVGA3dSurfaceDMAFlags flags; -} SVGA3dCmdSurfaceDMASuffix; - -/* - * SVGA_3D_CMD_DRAW_PRIMITIVES -- - * - * This command is the SVGA3D device's generic drawing entry point. - * It can draw multiple ranges of primitives, optionally using an - * index buffer, using an arbitrary collection of vertex buffers. - * - * Each SVGA3dVertexDecl defines a distinct vertex array to bind - * during this draw call. The declarations specify which surface - * the vertex data lives in, what that vertex data is used for, - * and how to interpret it. - * - * Each SVGA3dPrimitiveRange defines a collection of primitives - * to render using the same vertex arrays. An index buffer is - * optional. - */ - -typedef -struct { - /* - * A range hint is an optional specification for the range of indices - * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed - * that the entire array will be used. - * - * These are only hints. The SVGA3D device may use them for - * performance optimization if possible, but it's also allowed to - * ignore these values. - */ - uint32 first; - uint32 last; -} SVGA3dArrayRangeHint; - -typedef -struct { - /* - * Define the origin and shape of a vertex or index array. Both - * 'offset' and 'stride' are in bytes. The provided surface will be - * reinterpreted as a flat array of bytes in the same format used - * by surface DMA operations. To avoid unnecessary conversions, the - * surface should be created with the SVGA3D_BUFFER format. - * - * Index 0 in the array starts 'offset' bytes into the surface. - * Index 1 begins at byte 'offset + stride', etc. Array indices may - * not be negative. - */ - uint32 surfaceId; - uint32 offset; - uint32 stride; -} SVGA3dArray; - -typedef -struct { - /* - * Describe a vertex array's data type, and define how it is to be - * used by the fixed function pipeline or the vertex shader. It - * isn't useful to have two VertexDecls with the same - * VertexArrayIdentity in one draw call. - */ - SVGA3dDeclType type; - SVGA3dDeclMethod method; - SVGA3dDeclUsage usage; - uint32 usageIndex; -} SVGA3dVertexArrayIdentity; - -typedef -struct { - SVGA3dVertexArrayIdentity identity; - SVGA3dArray array; - SVGA3dArrayRangeHint rangeHint; -} SVGA3dVertexDecl; - -typedef -struct { - /* - * Define a group of primitives to render, from sequential indices. - * - * The value of 'primitiveType' and 'primitiveCount' imply the - * total number of vertices that will be rendered. - */ - SVGA3dPrimitiveType primType; - uint32 primitiveCount; - - /* - * Optional index buffer. If indexArray.surfaceId is - * SVGA3D_INVALID_ID, we render without an index buffer. Rendering - * without an index buffer is identical to rendering with an index - * buffer containing the sequence [0, 1, 2, 3, ...]. - * - * If an index buffer is in use, indexWidth specifies the width in - * bytes of each index value. It must be less than or equal to - * indexArray.stride. - * - * (Currently, the SVGA3D device requires index buffers to be tightly - * packed. In other words, indexWidth == indexArray.stride) - */ - SVGA3dArray indexArray; - uint32 indexWidth; - - /* - * Optional index bias. This number is added to all indices from - * indexArray before they are used as vertex array indices. This - * can be used in multiple ways: - * - * - When not using an indexArray, this bias can be used to - * specify where in the vertex arrays to begin rendering. - * - * - A positive number here is equivalent to increasing the - * offset in each vertex array. - * - * - A negative number can be used to render using a small - * vertex array and an index buffer that contains large - * values. This may be used by some applications that - * crop a vertex buffer without modifying their index - * buffer. - * - * Note that rendering with a negative bias value may be slower and - * use more memory than rendering with a positive or zero bias. - */ - int32 indexBias; -} SVGA3dPrimitiveRange; - -typedef -struct { - uint32 cid; - uint32 numVertexDecls; - uint32 numRanges; - - /* - * There are two variable size arrays after the - * SVGA3dCmdDrawPrimitives structure. In order, - * they are: - * - * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than - * SVGA3D_MAX_VERTEX_ARRAYS; - * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than - * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES; - * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains - * the frequency divisor for the corresponding vertex decl). - */ -} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */ - -typedef -struct { - uint32 stage; - SVGA3dTextureStateName name; - union { - uint32 value; - float floatValue; - }; -} SVGA3dTextureState; - -typedef -struct { - uint32 cid; - /* Followed by variable number of SVGA3dTextureState structures */ -} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */ - -typedef -struct { - uint32 cid; - SVGA3dTransformType type; - float matrix[16]; -} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */ - -typedef -struct { - float min; - float max; -} SVGA3dZRange; - -typedef -struct { - uint32 cid; - SVGA3dZRange zRange; -} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */ - -typedef -struct { - float diffuse[4]; - float ambient[4]; - float specular[4]; - float emissive[4]; - float shininess; -} SVGA3dMaterial; - -typedef -struct { - uint32 cid; - SVGA3dFace face; - SVGA3dMaterial material; -} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */ - -typedef -struct { - uint32 cid; - uint32 index; - SVGA3dLightData data; -} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */ - -typedef -struct { - uint32 cid; - uint32 index; - uint32 enabled; -} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */ - -typedef -struct { - uint32 cid; - SVGA3dRect rect; -} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */ - -typedef -struct { - uint32 cid; - SVGA3dRect rect; -} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */ - -typedef -struct { - uint32 cid; - uint32 index; - float plane[4]; -} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */ - -typedef -struct { - uint32 cid; - uint32 shid; - SVGA3dShaderType type; - /* Followed by variable number of DWORDs for shader bycode */ -} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */ - -typedef -struct { - uint32 cid; - uint32 shid; - SVGA3dShaderType type; -} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */ - -typedef -struct { - uint32 cid; - uint32 reg; /* register number */ - SVGA3dShaderType type; - SVGA3dShaderConstType ctype; - uint32 values[4]; -} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */ - -typedef -struct { - uint32 cid; - SVGA3dShaderType type; - uint32 shid; -} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */ - -typedef -struct { - uint32 cid; - SVGA3dQueryType type; -} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */ - -typedef -struct { - uint32 cid; - SVGA3dQueryType type; - SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */ -} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */ - -typedef -struct { - uint32 cid; /* Same parameters passed to END_QUERY */ - SVGA3dQueryType type; - SVGAGuestPtr guestResult; -} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */ - -typedef -struct { - uint32 totalSize; /* Set by guest before query is ended. */ - SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */ - union { /* Set by host on exit from PENDING state */ - uint32 result32; - }; -} SVGA3dQueryResult; - -/* - * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN -- - * - * This is a blit from an SVGA3D surface to a Screen Object. Just - * like GMR-to-screen blits, this blit may be directed at a - * specific screen or to the virtual coordinate space. - * - * The blit copies from a rectangular region of an SVGA3D surface - * image to a rectangular region of a screen or screens. - * - * This command takes an optional variable-length list of clipping - * rectangles after the body of the command. If no rectangles are - * specified, there is no clipping region. The entire destRect is - * drawn to. If one or more rectangles are included, they describe - * a clipping region. The clip rectangle coordinates are measured - * relative to the top-left corner of destRect. - * - * This clipping region serves multiple purposes: - * - * - It can be used to perform an irregularly shaped blit more - * efficiently than by issuing many separate blit commands. - * - * - It is equivalent to allowing blits with non-integer - * source coordinates. You could blit just one half-pixel - * of a source, for example, by specifying a larger - * destination rectangle than you need, then removing - * part of it using a clip rectangle. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT - * - * Limitations: - * - * - Currently, no backend supports blits from a mipmap or face - * other than the first one. - */ - -typedef -struct { - SVGA3dSurfaceImageId srcImage; - SVGASignedRect srcRect; - uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */ - SVGASignedRect destRect; /* Supports scaling if src/rest different size */ - /* Clipping: zero or more SVGASignedRects follow */ -} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */ - -typedef -struct { - uint32 sid; - SVGA3dTextureFilter filter; -} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */ - - -/* - * Guest-backed surface definitions. - */ - -typedef uint32 SVGAMobId; - -typedef enum SVGAMobFormat { - SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, - SVGA3D_MOBFMT_PTDEPTH_0 = 0, - SVGA3D_MOBFMT_PTDEPTH_1 = 1, - SVGA3D_MOBFMT_PTDEPTH_2 = 2, - SVGA3D_MOBFMT_RANGE = 3, - SVGA3D_MOBFMT_PTDEPTH64_0 = 4, - SVGA3D_MOBFMT_PTDEPTH64_1 = 5, - SVGA3D_MOBFMT_PTDEPTH64_2 = 6, - SVGA3D_MOBFMT_MAX, -} SVGAMobFormat; - -/* - * Sizes of opaque types. - */ - -#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16 -#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8 -#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64 -#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16 -#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64 -#define SVGA3D_CONTEXT_DATA_SIZE 16384 - -/* - * SVGA3dCmdSetOTableBase -- - * - * This command allows the guest to specify the base PPN of the - * specified object table. - */ - -typedef enum { - SVGA_OTABLE_MOB = 0, - SVGA_OTABLE_MIN = 0, - SVGA_OTABLE_SURFACE = 1, - SVGA_OTABLE_CONTEXT = 2, - SVGA_OTABLE_SHADER = 3, - SVGA_OTABLE_SCREEN_TARGET = 4, - SVGA_OTABLE_DX9_MAX = 5, - SVGA_OTABLE_MAX = 8 -} SVGAOTableType; - -typedef -struct { - SVGAOTableType type; - PPN baseAddress; - uint32 sizeInBytes; - uint32 validSizeInBytes; - SVGAMobFormat ptDepth; -} __packed -SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ - -typedef -struct { - SVGAOTableType type; - PPN64 baseAddress; - uint32 sizeInBytes; - uint32 validSizeInBytes; - SVGAMobFormat ptDepth; -} __packed -SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ - -typedef -struct { - SVGAOTableType type; -} __packed -SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ - -/* - * Define a memory object (Mob) in the OTable. - */ - -typedef -struct SVGA3dCmdDefineGBMob { - SVGAMobId mobid; - SVGAMobFormat ptDepth; - PPN base; - uint32 sizeInBytes; -} __packed -SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ - - -/* - * Destroys an object in the OTable. - */ - -typedef -struct SVGA3dCmdDestroyGBMob { - SVGAMobId mobid; -} __packed -SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ - -/* - * Redefine an object in the OTable. - */ - -typedef -struct SVGA3dCmdRedefineGBMob { - SVGAMobId mobid; - SVGAMobFormat ptDepth; - PPN base; - uint32 sizeInBytes; -} __packed -SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ - -/* - * Define a memory object (Mob) in the OTable with a PPN64 base. - */ - -typedef -struct SVGA3dCmdDefineGBMob64 { - SVGAMobId mobid; - SVGAMobFormat ptDepth; - PPN64 base; - uint32 sizeInBytes; -} __packed -SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ - -/* - * Redefine an object in the OTable with PPN64 base. - */ - -typedef -struct SVGA3dCmdRedefineGBMob64 { - SVGAMobId mobid; - SVGAMobFormat ptDepth; - PPN64 base; - uint32 sizeInBytes; -} __packed -SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ - -/* - * Notification that the page tables have been modified. - */ - -typedef -struct SVGA3dCmdUpdateGBMobMapping { - SVGAMobId mobid; -} __packed -SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ - -/* - * Define a guest-backed surface. - */ - -typedef -struct SVGA3dCmdDefineGBSurface { - uint32 sid; - SVGA3dSurfaceFlags surfaceFlags; - SVGA3dSurfaceFormat format; - uint32 numMipLevels; - uint32 multisampleCount; - SVGA3dTextureFilter autogenFilter; - SVGA3dSize size; -} __packed -SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ - -/* - * Destroy a guest-backed surface. - */ - -typedef -struct SVGA3dCmdDestroyGBSurface { - uint32 sid; -} __packed -SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ - -/* - * Bind a guest-backed surface to an object. - */ - -typedef -struct SVGA3dCmdBindGBSurface { - uint32 sid; - SVGAMobId mobid; -} __packed -SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ - -/* - * Conditionally bind a mob to a guest backed surface if testMobid - * matches the currently bound mob. Optionally issue a readback on - * the surface while it is still bound to the old mobid if the mobid - * is changed by this command. - */ - -#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) - -typedef -struct{ - uint32 sid; - SVGAMobId testMobid; - SVGAMobId mobid; - uint32 flags; -} __packed -SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ - -/* - * Update an image in a guest-backed surface. - * (Inform the device that the guest-contents have been updated.) - */ - -typedef -struct SVGA3dCmdUpdateGBImage { - SVGA3dSurfaceImageId image; - SVGA3dBox box; -} __packed -SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ - -/* - * Update an entire guest-backed surface. - * (Inform the device that the guest-contents have been updated.) - */ - -typedef -struct SVGA3dCmdUpdateGBSurface { - uint32 sid; -} __packed -SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ - -/* - * Readback an image in a guest-backed surface. - * (Request the device to flush the dirty contents into the guest.) - */ - -typedef -struct SVGA3dCmdReadbackGBImage { - SVGA3dSurfaceImageId image; -} __packed -SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ - -/* - * Readback an entire guest-backed surface. - * (Request the device to flush the dirty contents into the guest.) - */ - -typedef -struct SVGA3dCmdReadbackGBSurface { - uint32 sid; -} __packed -SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ - -/* - * Readback a sub rect of an image in a guest-backed surface. After - * issuing this command the driver is required to issue an update call - * of the same region before issuing any other commands that reference - * this surface or rendering is not guaranteed. - */ - -typedef -struct SVGA3dCmdReadbackGBImagePartial { - SVGA3dSurfaceImageId image; - SVGA3dBox box; - uint32 invertBox; -} __packed -SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ - -/* - * Invalidate an image in a guest-backed surface. - * (Notify the device that the contents can be lost.) - */ - -typedef -struct SVGA3dCmdInvalidateGBImage { - SVGA3dSurfaceImageId image; -} __packed -SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ - -/* - * Invalidate an entire guest-backed surface. - * (Notify the device that the contents if all images can be lost.) - */ - -typedef -struct SVGA3dCmdInvalidateGBSurface { - uint32 sid; -} __packed -SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ - -/* - * Invalidate a sub rect of an image in a guest-backed surface. After - * issuing this command the driver is required to issue an update call - * of the same region before issuing any other commands that reference - * this surface or rendering is not guaranteed. - */ - -typedef -struct SVGA3dCmdInvalidateGBImagePartial { - SVGA3dSurfaceImageId image; - SVGA3dBox box; - uint32 invertBox; -} __packed -SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ - -/* - * Define a guest-backed context. - */ - -typedef -struct SVGA3dCmdDefineGBContext { - uint32 cid; -} __packed -SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ - -/* - * Destroy a guest-backed context. - */ - -typedef -struct SVGA3dCmdDestroyGBContext { - uint32 cid; -} __packed -SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ - -/* - * Bind a guest-backed context. - * - * validContents should be set to 0 for new contexts, - * and 1 if this is an old context which is getting paged - * back on to the device. - * - * For new contexts, it is recommended that the driver - * issue commands to initialize all interesting state - * prior to rendering. - */ - -typedef -struct SVGA3dCmdBindGBContext { - uint32 cid; - SVGAMobId mobid; - uint32 validContents; -} __packed -SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ - -/* - * Readback a guest-backed context. - * (Request that the device flush the contents back into guest memory.) - */ - -typedef -struct SVGA3dCmdReadbackGBContext { - uint32 cid; -} __packed -SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ - -/* - * Invalidate a guest-backed context. - */ -typedef -struct SVGA3dCmdInvalidateGBContext { - uint32 cid; -} __packed -SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ - -/* - * Define a guest-backed shader. - */ - -typedef -struct SVGA3dCmdDefineGBShader { - uint32 shid; - SVGA3dShaderType type; - uint32 sizeInBytes; -} __packed -SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ - -/* - * Bind a guest-backed shader. - */ - -typedef struct SVGA3dCmdBindGBShader { - uint32 shid; - SVGAMobId mobid; - uint32 offsetInBytes; -} __packed -SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ - -/* - * Destroy a guest-backed shader. - */ - -typedef struct SVGA3dCmdDestroyGBShader { - uint32 shid; -} __packed -SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ - -typedef -struct { - uint32 cid; - uint32 regStart; - SVGA3dShaderType shaderType; - SVGA3dShaderConstType constType; - - /* - * Followed by a variable number of shader constants. - * - * Note that FLOAT and INT constants are 4-dwords in length, while - * BOOL constants are 1-dword in length. - */ -} __packed -SVGA3dCmdSetGBShaderConstInline; -/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ - -typedef -struct { - uint32 cid; - SVGA3dQueryType type; -} __packed -SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ - -typedef -struct { - uint32 cid; - SVGA3dQueryType type; - SVGAMobId mobid; - uint32 offset; -} __packed -SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ - - -/* - * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- - * - * The semantics of this command are identical to the - * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written - * to a Mob instead of a GMR. - */ - -typedef -struct { - uint32 cid; - SVGA3dQueryType type; - SVGAMobId mobid; - uint32 offset; -} __packed -SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ - -typedef -struct { - SVGAMobId mobid; - uint32 fbOffset; - uint32 initalized; -} __packed -SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ - -typedef -struct { - SVGAMobId mobid; - uint32 gartOffset; -} __packed -SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ - - -typedef -struct { - uint32 gartOffset; - uint32 numPages; -} __packed -SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ - - -/* - * Screen Targets - */ -#define SVGA_STFLAG_PRIMARY (1 << 0) - -typedef -struct { - uint32 stid; - uint32 width; - uint32 height; - int32 xRoot; - int32 yRoot; - uint32 flags; -} __packed -SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ - -typedef -struct { - uint32 stid; -} __packed -SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ - -typedef -struct { - uint32 stid; - SVGA3dSurfaceImageId image; -} __packed -SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ - -typedef -struct { - uint32 stid; - SVGA3dBox box; -} __packed -SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ - -/* - * Capability query index. - * - * Notes: - * - * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of - * fixed-function texture units available. Each of these units - * work in both FFP and Shader modes, and they support texture - * transforms and texture coordinates. The host may have additional - * texture image units that are only usable with shaders. - * - * 2. The BUFFER_FORMAT capabilities are deprecated, and they always - * return TRUE. Even on physical hardware that does not support - * these formats natively, the SVGA3D device will provide an emulation - * which should be invisible to the guest OS. - * - * In general, the SVGA3D device should support any operation on - * any surface format, it just may perform some of these - * operations in software depending on the capabilities of the - * available physical hardware. - * - * XXX: In the future, we will add capabilities that describe in - * detail what formats are supported in hardware for what kinds - * of operations. - */ - -typedef enum { - SVGA3D_DEVCAP_3D = 0, - SVGA3D_DEVCAP_MAX_LIGHTS = 1, - SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */ - SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3, - SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4, - SVGA3D_DEVCAP_VERTEX_SHADER = 5, - SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6, - SVGA3D_DEVCAP_FRAGMENT_SHADER = 7, - SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8, - SVGA3D_DEVCAP_S23E8_TEXTURES = 9, - SVGA3D_DEVCAP_S10E5_TEXTURES = 10, - SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11, - SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */ - SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */ - SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */ - SVGA3D_DEVCAP_QUERY_TYPES = 15, - SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16, - SVGA3D_DEVCAP_MAX_POINT_SIZE = 17, - SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18, - SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19, - SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20, - SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21, - SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22, - SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23, - SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24, - SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25, - SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26, - SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27, - SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28, - SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29, - SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30, - SVGA3D_DEVCAP_TEXTURE_OPS = 31, - SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32, - SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33, - SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34, - SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35, - SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36, - SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37, - SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38, - SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39, - SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40, - SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41, - SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42, - SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43, - SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44, - SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45, - SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46, - SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47, - SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48, - SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49, - SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50, - SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51, - SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52, - SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53, - SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54, - SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55, - SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56, - SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57, - SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58, - SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59, - SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60, - SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61, - SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63, - - /* - * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color - * render targets. This does no include the depth or stencil targets. - */ - SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64, - - SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65, - SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66, - SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67, - SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68, - SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69, - SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70, - SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71, - SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72, - SVGA3D_DEVCAP_SUPERSAMPLE = 73, - SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74, - SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75, - SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76, - - /* - * This is the maximum number of SVGA context IDs that the guest - * can define using SVGA_3D_CMD_CONTEXT_DEFINE. - */ - SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77, - - /* - * This is the maximum number of SVGA surface IDs that the guest - * can define using SVGA_3D_CMD_SURFACE_DEFINE*. - */ - SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78, - - SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79, - SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80, - SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81, - - SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82, - SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, - - /* - * Deprecated. - */ - SVGA3D_DEVCAP_VGPU10 = 84, - - /* - * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements - * ored together, one for every type of video decoding supported. - */ - SVGA3D_DEVCAP_VIDEO_DECODE = 85, - - /* - * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements - * ored together, one for every type of video processing supported. - */ - SVGA3D_DEVCAP_VIDEO_PROCESS = 86, - - SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ - SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ - SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ - SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ - - SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, - - /* - * Does the host support the SVGA logic ops commands? - */ - SVGA3D_DEVCAP_LOGICOPS = 92, - - /* - * What support does the host have for screen targets? - * - * See the SVGA3D_SCREENTARGET_CAP bits below. - */ - SVGA3D_DEVCAP_SCREENTARGETS = 93, - - SVGA3D_DEVCAP_MAX /* This must be the last index. */ -} SVGA3dDevCapIndex; - -typedef union { - Bool b; - uint32 u; - int32 i; - float f; -} SVGA3dDevCapResult; - -typedef enum { - SVGA3DCAPS_RECORD_UNKNOWN = 0, - SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, - SVGA3DCAPS_RECORD_DEVCAPS = 0x100, - SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, -} SVGA3dCapsRecordType; - -typedef -struct SVGA3dCapsRecordHeader { - uint32 length; - SVGA3dCapsRecordType type; -} -SVGA3dCapsRecordHeader; - -typedef -struct SVGA3dCapsRecord { - SVGA3dCapsRecordHeader header; - uint32 data[1]; -} -SVGA3dCapsRecord; - - -typedef uint32 SVGA3dCapPair[2]; - -#endif /* _SVGA3D_REG_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h deleted file mode 100644 index ef3385096145..000000000000 --- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ /dev/null @@ -1,912 +0,0 @@ -/************************************************************************** - * - * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifdef __KERNEL__ - -#include <drm/vmwgfx_drm.h> -#define surf_size_struct struct drm_vmw_size - -#else /* __KERNEL__ */ - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0])) -#endif /* ARRAY_SIZE */ - -#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) -#define max_t(type, x, y) ((x) > (y) ? (x) : (y)) -#define min_t(type, x, y) ((x) < (y) ? (x) : (y)) -#define surf_size_struct SVGA3dSize -#define u32 uint32 -#define u64 uint64_t -#define U32_MAX ((u32)~0U) - -#endif /* __KERNEL__ */ - -#include "svga3d_reg.h" - -/* - * enum svga3d_block_desc describes the active data channels in a block. - * - * There can be at-most four active channels in a block: - * 1. Red, bump W, luminance and depth are stored in the first channel. - * 2. Green, bump V and stencil are stored in the second channel. - * 3. Blue and bump U are stored in the third channel. - * 4. Alpha and bump Q are stored in the fourth channel. - * - * Block channels can be used to store compressed and buffer data: - * 1. For compressed formats, only the data channel is used and its size - * is equal to that of a singular block in the compression scheme. - * 2. For buffer formats, only the data channel is used and its size is - * exactly one byte in length. - * 3. In each case the bit depth represent the size of a singular block. - * - * Note: Compressed and IEEE formats do not use the bitMask structure. - */ - -enum svga3d_block_desc { - SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */ - SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel - data */ - SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel - data */ - SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video - U and V */ - SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel - data */ - SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel - data */ - SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil - channel */ - SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel - data */ - SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel - data */ - SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel - data */ - SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance - data */ - SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */ - SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha - channel */ - SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel - data */ - SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of - data */ - SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of - data depending on the - compression method used */ - SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE - floating point - representation in - all channels */ - SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store - data. */ - SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */ - SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */ - SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */ - SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */ - SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV, - e.g., NV12. */ - SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate - Y, U, V, e.g., YV12. */ - - SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN, - SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V, - SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_LUMINANCE, - SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_W, - SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V | - SVGA3DBLOCKDESC_W | - SVGA3DBLOCKDESC_Q, - SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_IEEE_FP, - SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP | - SVGA3DBLOCKDESC_GREEN, - SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_STENCIL, - SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO | - SVGA3DBLOCKDESC_Y, - SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_Y | - SVGA3DBLOCKDESC_U_VIDEO | - SVGA3DBLOCKDESC_V_VIDEO, - SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_EXP, - SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_2PLANAR_YUV, - SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_3PLANAR_YUV, -}; - -/* - * SVGA3dSurfaceDesc describes the actual pixel data. - * - * This structure provides the following information: - * 1. Block description. - * 2. Dimensions of a block in the surface. - * 3. Size of block in bytes. - * 4. Bit depth of the pixel data. - * 5. Channel bit depths and masks (if applicable). - */ -#define SVGA3D_CHANNEL_DEF(type) \ - struct { \ - union { \ - type blue; \ - type u; \ - type uv_video; \ - type u_video; \ - }; \ - union { \ - type green; \ - type v; \ - type stencil; \ - type v_video; \ - }; \ - union { \ - type red; \ - type w; \ - type luminance; \ - type y; \ - type depth; \ - type data; \ - }; \ - union { \ - type alpha; \ - type q; \ - type exp; \ - }; \ - } - -struct svga3d_surface_desc { - enum svga3d_block_desc block_desc; - surf_size_struct block_size; - u32 bytes_per_block; - u32 pitch_bytes_per_block; - - struct { - u32 total; - SVGA3D_CHANNEL_DEF(uint8); - } bit_depth; - - struct { - SVGA3D_CHANNEL_DEF(uint8); - } bit_offset; -}; - -static const struct svga3d_surface_desc svga3d_surface_descs[] = { - {SVGA3DBLOCKDESC_NONE, - {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } }, - {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } }, - {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } }, - {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } }, - {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } }, - {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */ - - {SVGA3DBLOCKDESC_LUMINANCE, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */ - - {SVGA3DBLOCKDESC_LA, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } }, - {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */ - - {SVGA3DBLOCKDESC_LUMINANCE, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */ - - {SVGA3DBLOCKDESC_LA, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } }, - {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } }, - {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } }, - {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */ - - {SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */ - - {SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */ - - {SVGA3DBLOCKDESC_UVWA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */ - - {SVGA3DBLOCKDESC_ALPHA, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */ - - {SVGA3DBLOCKDESC_BUFFER, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } }, - {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */ - - {SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } }, - {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */ - - {SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */ - - {SVGA3DBLOCKDESC_NV12, - {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */ - - {SVGA3DBLOCKDESC_AYUV, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */ - - {SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */ - - {SVGA3DBLOCKDESC_UVW, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */ - - {SVGA3DBLOCKDESC_GREEN, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */ - - {SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } }, - {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */ - - {SVGA3DBLOCKDESC_RGBA_SRGB, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */ - - {SVGA3DBLOCKDESC_GREEN, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */ - - {SVGA3DBLOCKDESC_RED, - {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */ - - {SVGA3DBLOCKDESC_RGBE, - {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } }, - {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA_SRGB, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGB_SRGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */ -}; - -static inline u32 clamped_umul32(u32 a, u32 b) -{ - u64 tmp = (u64) a*b; - return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; -} - -static inline const struct svga3d_surface_desc * -svga3dsurface_get_desc(SVGA3dSurfaceFormat format) -{ - if (format < ARRAY_SIZE(svga3d_surface_descs)) - return &svga3d_surface_descs[format]; - - return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID]; -} - -/* - *---------------------------------------------------------------------- - * - * svga3dsurface_get_mip_size -- - * - * Given a base level size and the mip level, compute the size of - * the mip level. - * - * Results: - * See above. - * - * Side effects: - * None. - * - *---------------------------------------------------------------------- - */ - -static inline surf_size_struct -svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level) -{ - surf_size_struct size; - - size.width = max_t(u32, base_level.width >> mip_level, 1); - size.height = max_t(u32, base_level.height >> mip_level, 1); - size.depth = max_t(u32, base_level.depth >> mip_level, 1); - return size; -} - -static inline void -svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc, - const surf_size_struct *pixel_size, - surf_size_struct *block_size) -{ - block_size->width = DIV_ROUND_UP(pixel_size->width, - desc->block_size.width); - block_size->height = DIV_ROUND_UP(pixel_size->height, - desc->block_size.height); - block_size->depth = DIV_ROUND_UP(pixel_size->depth, - desc->block_size.depth); -} - -static inline bool -svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc) -{ - return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0; -} - -static inline u32 -svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc, - const surf_size_struct *size) -{ - u32 pitch; - surf_size_struct blocks; - - svga3dsurface_get_size_in_blocks(desc, size, &blocks); - - pitch = blocks.width * desc->pitch_bytes_per_block; - - return pitch; -} - -/* - *----------------------------------------------------------------------------- - * - * svga3dsurface_get_image_buffer_size -- - * - * Return the number of bytes of buffer space required to store - * one image of a surface, optionally using the specified pitch. - * - * If pitch is zero, it is assumed that rows are tightly packed. - * - * This function is overflow-safe. If the result would have - * overflowed, instead we return MAX_UINT32. - * - * Results: - * Byte count. - * - * Side effects: - * None. - * - *----------------------------------------------------------------------------- - */ - -static inline u32 -svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc, - const surf_size_struct *size, - u32 pitch) -{ - surf_size_struct image_blocks; - u32 slice_size, total_size; - - svga3dsurface_get_size_in_blocks(desc, size, &image_blocks); - - if (svga3dsurface_is_planar_surface(desc)) { - total_size = clamped_umul32(image_blocks.width, - image_blocks.height); - total_size = clamped_umul32(total_size, image_blocks.depth); - total_size = clamped_umul32(total_size, desc->bytes_per_block); - return total_size; - } - - if (pitch == 0) - pitch = svga3dsurface_calculate_pitch(desc, size); - - slice_size = clamped_umul32(image_blocks.height, pitch); - total_size = clamped_umul32(slice_size, image_blocks.depth); - - return total_size; -} - -static inline u32 -svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, - surf_size_struct base_level_size, - u32 num_mip_levels, - bool cubemap) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - u64 total_size = 0; - u32 mip; - - for (mip = 0; mip < num_mip_levels; mip++) { - surf_size_struct size = - svga3dsurface_get_mip_size(base_level_size, mip); - total_size += svga3dsurface_get_image_buffer_size(desc, - &size, 0); - } - - if (cubemap) - total_size *= SVGA3D_MAX_SURFACE_FACES; - - return (u32) min_t(u64, total_size, (u64) U32_MAX); -} - - -/** - * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel - * in an image (or volume). - * - * @width: The image width in pixels. - * @height: The image height in pixels - */ -static inline u32 -svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, - u32 width, u32 height, - u32 x, u32 y, u32 z) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - const u32 bw = desc->block_size.width, bh = desc->block_size.height; - const u32 bd = desc->block_size.depth; - const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block; - const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride; - const u32 offset = (z / bd * imgstride + - y / bh * rowstride + - x / bw * desc->bytes_per_block); - return offset; -} - - -static inline u32 -svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, - surf_size_struct baseLevelSize, - u32 numMipLevels, - u32 face, - u32 mip) - -{ - u32 offset; - u32 mipChainBytes; - u32 mipChainBytesToLevel; - u32 i; - const struct svga3d_surface_desc *desc; - surf_size_struct mipSize; - u32 bytes; - - desc = svga3dsurface_get_desc(format); - - mipChainBytes = 0; - mipChainBytesToLevel = 0; - for (i = 0; i < numMipLevels; i++) { - mipSize = svga3dsurface_get_mip_size(baseLevelSize, i); - bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0); - mipChainBytes += bytes; - if (i < mip) - mipChainBytesToLevel += bytes; - } - - offset = mipChainBytes * face + mipChainBytesToLevel; - - return offset; -} diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h deleted file mode 100644 index 55836dedcfc2..000000000000 --- a/drivers/gpu/drm/vmwgfx/svga_types.h +++ /dev/null @@ -1,45 +0,0 @@ -/************************************************************************** - * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -/** - * Silly typedefs for the svga headers. Currently the headers are shared - * between all components that talk to svga. And as such the headers are - * are in a completely different style and use weird defines. - * - * This file lets all the ugly be prefixed with svga*. - */ - -#ifndef _SVGA_TYPES_H_ -#define _SVGA_TYPES_H_ - -typedef uint16_t uint16; -typedef uint32_t uint32; -typedef uint8_t uint8; -typedef int32_t int32; -typedef bool Bool; - -#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c new file mode 100644 index 000000000000..9c42e96da510 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -0,0 +1,1294 @@ +/************************************************************************** + * + * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * This file implements the vmwgfx context binding manager, + * The sole reason for having to use this code is that vmware guest + * backed contexts can be swapped out to their backing mobs by the device + * at any time, also swapped in at any time. At swapin time, the device + * validates the context bindings to make sure they point to valid resources. + * It's this outside-of-drawcall validation (that can happen at any time), + * that makes this code necessary. + * + * We therefore need to kill any context bindings pointing to a resource + * when the resource is swapped out. Furthermore, if the vmwgfx driver has + * swapped out the context we can't swap it in again to kill bindings because + * of backing mob reservation lockdep violations, so as part of + * context swapout, also kill all bindings of a context, so that they are + * already killed if a resource to which a binding points + * needs to be swapped out. + * + * Note that a resource can be pointed to by bindings from multiple contexts, + * Therefore we can't easily protect this data by a per context mutex + * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex + * to protect all binding manager data. + * + * Finally, any association between a context and a global resource + * (surface, shader or even DX query) is conceptually a context binding that + * needs to be tracked by this code. + */ + +#include "vmwgfx_drv.h" +#include "vmwgfx_binding.h" +#include "device_include/svga3d_reg.h" + +#define VMW_BINDING_RT_BIT 0 +#define VMW_BINDING_PS_BIT 1 +#define VMW_BINDING_SO_BIT 2 +#define VMW_BINDING_VB_BIT 3 +#define VMW_BINDING_NUM_BITS 4 + +#define VMW_BINDING_PS_SR_BIT 0 + +/** + * struct vmw_ctx_binding_state - per context binding state + * + * @dev_priv: Pointer to device private structure. + * @list: linked list of individual active bindings. + * @render_targets: Render target bindings. + * @texture_units: Texture units bindings. + * @ds_view: Depth-stencil view binding. + * @so_targets: StreamOutput target bindings. + * @vertex_buffers: Vertex buffer bindings. + * @index_buffer: Index buffer binding. + * @per_shader: Per shader-type bindings. + * @dirty: Bitmap tracking per binding-type changes that have not yet + * been emitted to the device. + * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that + * have not yet been emitted to the device. + * @bind_cmd_buffer: Scratch space used to construct binding commands. + * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer + * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the + * device binding slot of the first command data entry in @bind_cmd_buffer. + * + * Note that this structure also provides storage space for the individual + * struct vmw_ctx_binding objects, so that no dynamic allocation is needed + * for individual bindings. + * + */ +struct vmw_ctx_binding_state { + struct vmw_private *dev_priv; + struct list_head list; + struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX]; + struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS]; + struct vmw_ctx_bindinfo_view ds_view; + struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS]; + struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; + struct vmw_ctx_bindinfo_ib index_buffer; + struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10]; + + unsigned long dirty; + DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS); + + u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS]; + u32 bind_cmd_count; + u32 bind_first_slot; +}; + +static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind); +static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs); +static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, + bool rebind); +static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind); +static void vmw_binding_build_asserts(void) __attribute__ ((unused)); + +typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); + +/** + * struct vmw_binding_info - Per binding type information for the binding + * manager + * + * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo. + * @offsets: array[shader_slot] of offsets to the array[slot] + * of struct bindings for the binding type. + * @scrub_func: Pointer to the scrub function for this binding type. + * + * Holds static information to help optimize the binding manager and avoid + * an excessive amount of switch statements. + */ +struct vmw_binding_info { + size_t size; + const size_t *offsets; + vmw_scrub_func scrub_func; +}; + +/* + * A number of static variables that help determine the scrub func and the + * location of the struct vmw_ctx_bindinfo slots for each binding type. + */ +static const size_t vmw_binding_shader_offsets[] = { + offsetof(struct vmw_ctx_binding_state, per_shader[0].shader), + offsetof(struct vmw_ctx_binding_state, per_shader[1].shader), + offsetof(struct vmw_ctx_binding_state, per_shader[2].shader), +}; +static const size_t vmw_binding_rt_offsets[] = { + offsetof(struct vmw_ctx_binding_state, render_targets), +}; +static const size_t vmw_binding_tex_offsets[] = { + offsetof(struct vmw_ctx_binding_state, texture_units), +}; +static const size_t vmw_binding_cb_offsets[] = { + offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers), + offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers), + offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers), +}; +static const size_t vmw_binding_dx_ds_offsets[] = { + offsetof(struct vmw_ctx_binding_state, ds_view), +}; +static const size_t vmw_binding_sr_offsets[] = { + offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res), + offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res), + offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res), +}; +static const size_t vmw_binding_so_offsets[] = { + offsetof(struct vmw_ctx_binding_state, so_targets), +}; +static const size_t vmw_binding_vb_offsets[] = { + offsetof(struct vmw_ctx_binding_state, vertex_buffers), +}; +static const size_t vmw_binding_ib_offsets[] = { + offsetof(struct vmw_ctx_binding_state, index_buffer), +}; + +static const struct vmw_binding_info vmw_binding_infos[] = { + [vmw_ctx_binding_shader] = { + .size = sizeof(struct vmw_ctx_bindinfo_shader), + .offsets = vmw_binding_shader_offsets, + .scrub_func = vmw_binding_scrub_shader}, + [vmw_ctx_binding_rt] = { + .size = sizeof(struct vmw_ctx_bindinfo_view), + .offsets = vmw_binding_rt_offsets, + .scrub_func = vmw_binding_scrub_render_target}, + [vmw_ctx_binding_tex] = { + .size = sizeof(struct vmw_ctx_bindinfo_tex), + .offsets = vmw_binding_tex_offsets, + .scrub_func = vmw_binding_scrub_texture}, + [vmw_ctx_binding_cb] = { + .size = sizeof(struct vmw_ctx_bindinfo_cb), + .offsets = vmw_binding_cb_offsets, + .scrub_func = vmw_binding_scrub_cb}, + [vmw_ctx_binding_dx_shader] = { + .size = sizeof(struct vmw_ctx_bindinfo_shader), + .offsets = vmw_binding_shader_offsets, + .scrub_func = vmw_binding_scrub_dx_shader}, + [vmw_ctx_binding_dx_rt] = { + .size = sizeof(struct vmw_ctx_bindinfo_view), + .offsets = vmw_binding_rt_offsets, + .scrub_func = vmw_binding_scrub_dx_rt}, + [vmw_ctx_binding_sr] = { + .size = sizeof(struct vmw_ctx_bindinfo_view), + .offsets = vmw_binding_sr_offsets, + .scrub_func = vmw_binding_scrub_sr}, + [vmw_ctx_binding_ds] = { + .size = sizeof(struct vmw_ctx_bindinfo_view), + .offsets = vmw_binding_dx_ds_offsets, + .scrub_func = vmw_binding_scrub_dx_rt}, + [vmw_ctx_binding_so] = { + .size = sizeof(struct vmw_ctx_bindinfo_so), + .offsets = vmw_binding_so_offsets, + .scrub_func = vmw_binding_scrub_so}, + [vmw_ctx_binding_vb] = { + .size = sizeof(struct vmw_ctx_bindinfo_vb), + .offsets = vmw_binding_vb_offsets, + .scrub_func = vmw_binding_scrub_vb}, + [vmw_ctx_binding_ib] = { + .size = sizeof(struct vmw_ctx_bindinfo_ib), + .offsets = vmw_binding_ib_offsets, + .scrub_func = vmw_binding_scrub_ib}, +}; + +/** + * vmw_cbs_context - Return a pointer to the context resource of a + * context binding state tracker. + * + * @cbs: The context binding state tracker. + * + * Provided there are any active bindings, this function will return an + * unreferenced pointer to the context resource that owns the context + * binding state tracker. If there are no active bindings, this function + * will return NULL. Note that the caller must somehow ensure that a reference + * is held on the context resource prior to calling this function. + */ +static const struct vmw_resource * +vmw_cbs_context(const struct vmw_ctx_binding_state *cbs) +{ + if (list_empty(&cbs->list)) + return NULL; + + return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo, + ctx_list)->ctx; +} + +/** + * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location. + * + * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot. + * @bt: The binding type. + * @shader_slot: The shader slot of the binding. If none, then set to 0. + * @slot: The slot of the binding. + */ +static struct vmw_ctx_bindinfo * +vmw_binding_loc(struct vmw_ctx_binding_state *cbs, + enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot) +{ + const struct vmw_binding_info *b = &vmw_binding_infos[bt]; + size_t offset = b->offsets[shader_slot] + b->size*slot; + + return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset); +} + +/** + * vmw_binding_drop: Stop tracking a context binding + * + * @bi: Pointer to binding tracker storage. + * + * Stops tracking a context binding, and re-initializes its storage. + * Typically used when the context binding is replaced with a binding to + * another (or the same, for that matter) resource. + */ +static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi) +{ + list_del(&bi->ctx_list); + if (!list_empty(&bi->res_list)) + list_del(&bi->res_list); + bi->ctx = NULL; +} + +/** + * vmw_binding_add: Start tracking a context binding + * + * @cbs: Pointer to the context binding state tracker. + * @bi: Information about the binding to track. + * + * Starts tracking the binding in the context binding + * state structure @cbs. + */ +void vmw_binding_add(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *bi, + u32 shader_slot, u32 slot) +{ + struct vmw_ctx_bindinfo *loc = + vmw_binding_loc(cbs, bi->bt, shader_slot, slot); + const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt]; + + if (loc->ctx != NULL) + vmw_binding_drop(loc); + + memcpy(loc, bi, b->size); + loc->scrubbed = false; + list_add(&loc->ctx_list, &cbs->list); + INIT_LIST_HEAD(&loc->res_list); +} + +/** + * vmw_binding_transfer: Transfer a context binding tracking entry. + * + * @cbs: Pointer to the persistent context binding state tracker. + * @bi: Information about the binding to track. + * + */ +static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_binding_state *from, + const struct vmw_ctx_bindinfo *bi) +{ + size_t offset = (unsigned long)bi - (unsigned long)from; + struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *) + ((unsigned long) cbs + offset); + + if (loc->ctx != NULL) { + WARN_ON(bi->scrubbed); + + vmw_binding_drop(loc); + } + + if (bi->res != NULL) { + memcpy(loc, bi, vmw_binding_infos[bi->bt].size); + list_add_tail(&loc->ctx_list, &cbs->list); + list_add_tail(&loc->res_list, &loc->res->binding_head); + } +} + +/** + * vmw_binding_state_kill - Kill all bindings associated with a + * struct vmw_ctx_binding state structure, and re-initialize the structure. + * + * @cbs: Pointer to the context binding state tracker. + * + * Emits commands to scrub all bindings associated with the + * context binding state tracker. Then re-initializes the whole structure. + */ +void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_bindinfo *entry, *next; + + vmw_binding_state_scrub(cbs); + list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) + vmw_binding_drop(entry); +} + +/** + * vmw_binding_state_scrub - Scrub all bindings associated with a + * struct vmw_ctx_binding state structure. + * + * @cbs: Pointer to the context binding state tracker. + * + * Emits commands to scrub all bindings associated with the + * context binding state tracker. + */ +void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_bindinfo *entry; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (!entry->scrubbed) { + (void) vmw_binding_infos[entry->bt].scrub_func + (entry, false); + entry->scrubbed = true; + } + } + + (void) vmw_binding_emit_dirty(cbs); +} + +/** + * vmw_binding_res_list_kill - Kill all bindings on a + * resource binding list + * + * @head: list head of resource binding list + * + * Kills all bindings associated with a specific resource. Typically + * called before the resource is destroyed. + */ +void vmw_binding_res_list_kill(struct list_head *head) +{ + struct vmw_ctx_bindinfo *entry, *next; + + vmw_binding_res_list_scrub(head); + list_for_each_entry_safe(entry, next, head, res_list) + vmw_binding_drop(entry); +} + +/** + * vmw_binding_res_list_scrub - Scrub all bindings on a + * resource binding list + * + * @head: list head of resource binding list + * + * Scrub all bindings associated with a specific resource. Typically + * called before the resource is evicted. + */ +void vmw_binding_res_list_scrub(struct list_head *head) +{ + struct vmw_ctx_bindinfo *entry; + + list_for_each_entry(entry, head, res_list) { + if (!entry->scrubbed) { + (void) vmw_binding_infos[entry->bt].scrub_func + (entry, false); + entry->scrubbed = true; + } + } + + list_for_each_entry(entry, head, res_list) { + struct vmw_ctx_binding_state *cbs = + vmw_context_binding_state(entry->ctx); + + (void) vmw_binding_emit_dirty(cbs); + } +} + + +/** + * vmw_binding_state_commit - Commit staged binding info + * + * @ctx: Pointer to context to commit the staged binding info to. + * @from: Staged binding info built during execbuf. + * @scrubbed: Transfer only scrubbed bindings. + * + * Transfers binding info from a temporary structure + * (typically used by execbuf) to the persistent + * structure in the context. This can be done once commands have been + * submitted to hardware + */ +void vmw_binding_state_commit(struct vmw_ctx_binding_state *to, + struct vmw_ctx_binding_state *from) +{ + struct vmw_ctx_bindinfo *entry, *next; + + list_for_each_entry_safe(entry, next, &from->list, ctx_list) { + vmw_binding_transfer(to, from, entry); + vmw_binding_drop(entry); + } +} + +/** + * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context + * + * @ctx: The context resource + * + * Walks through the context binding list and rebinds all scrubbed + * resources. + */ +int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_bindinfo *entry; + int ret; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (likely(!entry->scrubbed)) + continue; + + if ((entry->res == NULL || entry->res->id == + SVGA3D_INVALID_ID)) + continue; + + ret = vmw_binding_infos[entry->bt].scrub_func(entry, true); + if (unlikely(ret != 0)) + return ret; + + entry->scrubbed = false; + } + + return vmw_binding_emit_dirty(cbs); +} + +/** + * vmw_binding_scrub_shader - scrub a shader binding from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_bindinfo_shader *binding = + container_of(bi, typeof(*binding), bi); + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdSetShader body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "unbinding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_SET_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = bi->ctx->id; + cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; + cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_binding_scrub_render_target - scrub a render target binding + * from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind) +{ + struct vmw_ctx_bindinfo_view *binding = + container_of(bi, typeof(*binding), bi); + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdSetRenderTarget body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for render target " + "unbinding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = bi->ctx->id; + cmd->body.type = binding->slot; + cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + cmd->body.target.face = 0; + cmd->body.target.mipmap = 0; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_binding_scrub_texture - scrub a texture binding from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + * + * TODO: Possibly complement this function with a function that takes + * a list of texture bindings and combines them to a single command. + */ +static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, + bool rebind) +{ + struct vmw_ctx_bindinfo_tex *binding = + container_of(bi, typeof(*binding), bi); + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + struct { + SVGA3dCmdSetTextureState c; + SVGA3dTextureState s1; + } body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for texture " + "unbinding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; + cmd->header.size = sizeof(cmd->body); + cmd->body.c.cid = bi->ctx->id; + cmd->body.s1.stage = binding->texture_stage; + cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; + cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_bindinfo_shader *binding = + container_of(bi, typeof(*binding), bi); + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetShader body; + } *cmd; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for DX shader " + "unbinding.\n"); + return -ENOMEM; + } + cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; + cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_binding_scrub_cb - scrub a constant buffer binding from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_bindinfo_cb *binding = + container_of(bi, typeof(*binding), bi); + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetSingleConstantBuffer body; + } *cmd; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for DX shader " + "unbinding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER; + cmd->header.size = sizeof(cmd->body); + cmd->body.slot = binding->slot; + cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; + if (rebind) { + cmd->body.offsetInBytes = binding->offset; + cmd->body.sizeInBytes = binding->size; + cmd->body.sid = bi->res->id; + } else { + cmd->body.offsetInBytes = 0; + cmd->body.sizeInBytes = 0; + cmd->body.sid = SVGA3D_INVALID_ID; + } + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_collect_view_ids - Build view id data for a view binding command + * without checking which bindings actually need to be emitted + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * @bi: Pointer to where the binding info array is stored in @cbs + * @max_num: Maximum number of entries in the @bi array. + * + * Scans the @bi array for bindings and builds a buffer of view id data. + * Stops at the first non-existing binding in the @bi array. + * On output, @cbs->bind_cmd_count contains the number of bindings to be + * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer + * contains the command data. + */ +static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *bi, + u32 max_num) +{ + const struct vmw_ctx_bindinfo_view *biv = + container_of(bi, struct vmw_ctx_bindinfo_view, bi); + unsigned long i; + + cbs->bind_cmd_count = 0; + cbs->bind_first_slot = 0; + + for (i = 0; i < max_num; ++i, ++biv) { + if (!biv->bi.ctx) + break; + + cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = + ((biv->bi.scrubbed) ? + SVGA3D_INVALID_ID : biv->bi.res->id); + } +} + +/** + * vmw_collect_dirty_view_ids - Build view id data for a view binding command + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * @bi: Pointer to where the binding info array is stored in @cbs + * @dirty: Bitmap indicating which bindings need to be emitted. + * @max_num: Maximum number of entries in the @bi array. + * + * Scans the @bi array for bindings that need to be emitted and + * builds a buffer of view id data. + * On output, @cbs->bind_cmd_count contains the number of bindings to be + * emitted, @cbs->bind_first_slot indicates the index of the first emitted + * binding, and @cbs->bind_cmd_buffer contains the command data. + */ +static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *bi, + unsigned long *dirty, + u32 max_num) +{ + const struct vmw_ctx_bindinfo_view *biv = + container_of(bi, struct vmw_ctx_bindinfo_view, bi); + unsigned long i, next_bit; + + cbs->bind_cmd_count = 0; + i = find_first_bit(dirty, max_num); + next_bit = i; + cbs->bind_first_slot = i; + + biv += i; + for (; i < max_num; ++i, ++biv) { + cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = + ((!biv->bi.ctx || biv->bi.scrubbed) ? + SVGA3D_INVALID_ID : biv->bi.res->id); + + if (next_bit == i) { + next_bit = find_next_bit(dirty, max_num, i + 1); + if (next_bit >= max_num) + break; + } + } +} + +/** + * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + */ +static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, + int shader_slot) +{ + const struct vmw_ctx_bindinfo *loc = + &cbs->per_shader[shader_slot].shader_res[0].bi; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetShaderResources body; + } *cmd; + size_t cmd_size, view_id_size; + const struct vmw_resource *ctx = vmw_cbs_context(cbs); + + vmw_collect_dirty_view_ids(cbs, loc, + cbs->per_shader[shader_slot].dirty_sr, + SVGA3D_DX_MAX_SRVIEWS); + if (cbs->bind_cmd_count == 0) + return 0; + + view_id_size = cbs->bind_cmd_count*sizeof(uint32); + cmd_size = sizeof(*cmd) + view_id_size; + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for DX shader" + " resource binding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES; + cmd->header.size = sizeof(cmd->body) + view_id_size; + cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN; + cmd->body.startView = cbs->bind_first_slot; + + memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); + + vmw_fifo_commit(ctx->dev_priv, cmd_size); + bitmap_clear(cbs->per_shader[shader_slot].dirty_sr, + cbs->bind_first_slot, cbs->bind_cmd_count); + + return 0; +} + +/** + * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + */ +static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) +{ + const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetRenderTargets body; + } *cmd; + size_t cmd_size, view_id_size; + const struct vmw_resource *ctx = vmw_cbs_context(cbs); + + vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS); + view_id_size = cbs->bind_cmd_count*sizeof(uint32); + cmd_size = sizeof(*cmd) + view_id_size; + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for DX render-target" + " binding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS; + cmd->header.size = sizeof(cmd->body) + view_id_size; + + if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed) + cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id; + else + cmd->body.depthStencilViewId = SVGA3D_INVALID_ID; + + memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); + + vmw_fifo_commit(ctx->dev_priv, cmd_size); + + return 0; + +} + +/** + * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command + * without checking which bindings actually need to be emitted + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * @bi: Pointer to where the binding info array is stored in @cbs + * @max_num: Maximum number of entries in the @bi array. + * + * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data. + * Stops at the first non-existing binding in the @bi array. + * On output, @cbs->bind_cmd_count contains the number of bindings to be + * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer + * contains the command data. + */ +static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *bi, + u32 max_num) +{ + const struct vmw_ctx_bindinfo_so *biso = + container_of(bi, struct vmw_ctx_bindinfo_so, bi); + unsigned long i; + SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer; + + cbs->bind_cmd_count = 0; + cbs->bind_first_slot = 0; + + for (i = 0; i < max_num; ++i, ++biso, ++so_buffer, + ++cbs->bind_cmd_count) { + if (!biso->bi.ctx) + break; + + if (!biso->bi.scrubbed) { + so_buffer->sid = biso->bi.res->id; + so_buffer->offset = biso->offset; + so_buffer->sizeInBytes = biso->size; + } else { + so_buffer->sid = SVGA3D_INVALID_ID; + so_buffer->offset = 0; + so_buffer->sizeInBytes = 0; + } + } +} + +/** + * vmw_binding_emit_set_so - Issue delayed streamout binding commands + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + */ +static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs) +{ + const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetSOTargets body; + } *cmd; + size_t cmd_size, so_target_size; + const struct vmw_resource *ctx = vmw_cbs_context(cbs); + + vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS); + if (cbs->bind_cmd_count == 0) + return 0; + + so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget); + cmd_size = sizeof(*cmd) + so_target_size; + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for DX SO target" + " binding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS; + cmd->header.size = sizeof(cmd->body) + so_target_size; + memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size); + + vmw_fifo_commit(ctx->dev_priv, cmd_size); + + return 0; + +} + +/** + * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * + */ +static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0]; + u32 i; + int ret; + + for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) { + if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty)) + continue; + + ret = vmw_emit_set_sr(cbs, i); + if (ret) + break; + + __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty); + } + + return 0; +} + +/** + * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a + * SVGA3dCmdDXSetVertexBuffers command + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * @bi: Pointer to where the binding info array is stored in @cbs + * @dirty: Bitmap indicating which bindings need to be emitted. + * @max_num: Maximum number of entries in the @bi array. + * + * Scans the @bi array for bindings that need to be emitted and + * builds a buffer of SVGA3dVertexBuffer data. + * On output, @cbs->bind_cmd_count contains the number of bindings to be + * emitted, @cbs->bind_first_slot indicates the index of the first emitted + * binding, and @cbs->bind_cmd_buffer contains the command data. + */ +static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *bi, + unsigned long *dirty, + u32 max_num) +{ + const struct vmw_ctx_bindinfo_vb *biv = + container_of(bi, struct vmw_ctx_bindinfo_vb, bi); + unsigned long i, next_bit; + SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer; + + cbs->bind_cmd_count = 0; + i = find_first_bit(dirty, max_num); + next_bit = i; + cbs->bind_first_slot = i; + + biv += i; + for (; i < max_num; ++i, ++biv, ++vbs) { + if (!biv->bi.ctx || biv->bi.scrubbed) { + vbs->sid = SVGA3D_INVALID_ID; + vbs->stride = 0; + vbs->offset = 0; + } else { + vbs->sid = biv->bi.res->id; + vbs->stride = biv->stride; + vbs->offset = biv->offset; + } + cbs->bind_cmd_count++; + if (next_bit == i) { + next_bit = find_next_bit(dirty, max_num, i + 1); + if (next_bit >= max_num) + break; + } + } +} + +/** + * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * + */ +static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) +{ + const struct vmw_ctx_bindinfo *loc = + &cbs->vertex_buffers[0].bi; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetVertexBuffers body; + } *cmd; + size_t cmd_size, set_vb_size; + const struct vmw_resource *ctx = vmw_cbs_context(cbs); + + vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb, + SVGA3D_DX_MAX_VERTEXBUFFERS); + if (cbs->bind_cmd_count == 0) + return 0; + + set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer); + cmd_size = sizeof(*cmd) + set_vb_size; + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for DX vertex buffer" + " binding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS; + cmd->header.size = sizeof(cmd->body) + set_vb_size; + cmd->body.startBuffer = cbs->bind_first_slot; + + memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size); + + vmw_fifo_commit(ctx->dev_priv, cmd_size); + bitmap_clear(cbs->dirty_vb, + cbs->bind_first_slot, cbs->bind_cmd_count); + + return 0; +} + +/** + * vmw_binding_emit_dirty - Issue delayed binding commands + * + * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * + * This function issues the delayed binding commands that arise from + * previous scrub / unscrub calls. These binding commands are typically + * commands that batch a number of bindings and therefore it makes sense + * to delay them. + */ +static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs) +{ + int ret = 0; + unsigned long hit = 0; + + while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit)) + < VMW_BINDING_NUM_BITS) { + + switch (hit) { + case VMW_BINDING_RT_BIT: + ret = vmw_emit_set_rt(cbs); + break; + case VMW_BINDING_PS_BIT: + ret = vmw_binding_emit_dirty_ps(cbs); + break; + case VMW_BINDING_SO_BIT: + ret = vmw_emit_set_so(cbs); + break; + case VMW_BINDING_VB_BIT: + ret = vmw_emit_set_vb(cbs); + break; + default: + BUG(); + } + if (ret) + return ret; + + __clear_bit(hit, &cbs->dirty); + hit++; + } + + return 0; +} + +/** + * vmw_binding_scrub_sr - Schedule a dx shaderresource binding + * scrub from a context + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_bindinfo_view *biv = + container_of(bi, struct vmw_ctx_bindinfo_view, bi); + struct vmw_ctx_binding_state *cbs = + vmw_context_binding_state(bi->ctx); + + __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr); + __set_bit(VMW_BINDING_PS_SR_BIT, + &cbs->per_shader[biv->shader_slot].dirty); + __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty); + + return 0; +} + +/** + * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding + * scrub from a context + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_binding_state *cbs = + vmw_context_binding_state(bi->ctx); + + __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty); + + return 0; +} + +/** + * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding + * scrub from a context + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_binding_state *cbs = + vmw_context_binding_state(bi->ctx); + + __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty); + + return 0; +} + +/** + * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding + * scrub from a context + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_bindinfo_vb *bivb = + container_of(bi, struct vmw_ctx_bindinfo_vb, bi); + struct vmw_ctx_binding_state *cbs = + vmw_context_binding_state(bi->ctx); + + __set_bit(bivb->slot, cbs->dirty_vb); + __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty); + + return 0; +} + +/** + * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_ctx_bindinfo_ib *binding = + container_of(bi, typeof(*binding), bi); + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetIndexBuffer body; + } *cmd; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for DX index buffer " + "binding.\n"); + return -ENOMEM; + } + cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER; + cmd->header.size = sizeof(cmd->body); + if (rebind) { + cmd->body.sid = bi->res->id; + cmd->body.format = binding->format; + cmd->body.offset = binding->offset; + } else { + cmd->body.sid = SVGA3D_INVALID_ID; + cmd->body.format = 0; + cmd->body.offset = 0; + } + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with + * memory accounting. + * + * @dev_priv: Pointer to a device private structure. + * + * Returns a pointer to a newly allocated struct or an error pointer on error. + */ +struct vmw_ctx_binding_state * +vmw_binding_state_alloc(struct vmw_private *dev_priv) +{ + struct vmw_ctx_binding_state *cbs; + int ret; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs), + false, false); + if (ret) + return ERR_PTR(ret); + + cbs = vzalloc(sizeof(*cbs)); + if (!cbs) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); + return ERR_PTR(-ENOMEM); + } + + cbs->dev_priv = dev_priv; + INIT_LIST_HEAD(&cbs->list); + + return cbs; +} + +/** + * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its + * memory accounting info. + * + * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. + */ +void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_private *dev_priv = cbs->dev_priv; + + vfree(cbs); + ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); +} + +/** + * vmw_binding_state_list - Get the binding list of a + * struct vmw_ctx_binding_state + * + * @cbs: Pointer to the struct vmw_ctx_binding_state + * + * Returns the binding list which can be used to traverse through the bindings + * and access the resource information of all bindings. + */ +struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs) +{ + return &cbs->list; +} + +/** + * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state + * + * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared + * + * Drops all bindings registered in @cbs. No device binding actions are + * performed. + */ +void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_bindinfo *entry, *next; + + list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) + vmw_binding_drop(entry); +} + +/* + * This function is unused at run-time, and only used to hold various build + * asserts important for code optimization assumptions. + */ +static void vmw_binding_build_asserts(void) +{ + BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3); + BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX); + BUILD_BUG_ON(sizeof(uint32) != sizeof(u32)); + + /* + * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various + * view id arrays. + */ + BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX); + BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS); + BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS); + + /* + * struct vmw_ctx_binding_state::bind_cmd_buffer is used for + * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers + */ + BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) > + VMW_MAX_VIEW_BINDINGS*sizeof(u32)); + BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) > + VMW_MAX_VIEW_BINDINGS*sizeof(u32)); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h new file mode 100644 index 000000000000..bf2e77ad5a20 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h @@ -0,0 +1,209 @@ +/************************************************************************** + * + * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +#ifndef _VMWGFX_BINDING_H_ +#define _VMWGFX_BINDING_H_ + +#include "device_include/svga3d_reg.h" +#include <linux/list.h> + +#define VMW_MAX_VIEW_BINDINGS 128 + +struct vmw_private; +struct vmw_ctx_binding_state; + +/* + * enum vmw_ctx_binding_type - abstract resource to context binding types + */ +enum vmw_ctx_binding_type { + vmw_ctx_binding_shader, + vmw_ctx_binding_rt, + vmw_ctx_binding_tex, + vmw_ctx_binding_cb, + vmw_ctx_binding_dx_shader, + vmw_ctx_binding_dx_rt, + vmw_ctx_binding_sr, + vmw_ctx_binding_ds, + vmw_ctx_binding_so, + vmw_ctx_binding_vb, + vmw_ctx_binding_ib, + vmw_ctx_binding_max +}; + +/** + * struct vmw_ctx_bindinfo - single binding metadata + * + * @ctx_list: List head for the context's list of bindings. + * @res_list: List head for a resource's list of bindings. + * @ctx: Non-refcounted pointer to the context that owns the binding. NULL + * indicates no binding present. + * @res: Non-refcounted pointer to the resource the binding points to. This + * is typically a surface or a view. + * @bt: Binding type. + * @scrubbed: Whether the binding has been scrubbed from the context. + */ +struct vmw_ctx_bindinfo { + struct list_head ctx_list; + struct list_head res_list; + struct vmw_resource *ctx; + struct vmw_resource *res; + enum vmw_ctx_binding_type bt; + bool scrubbed; +}; + +/** + * struct vmw_ctx_bindinfo_tex - texture stage binding metadata + * + * @bi: struct vmw_ctx_bindinfo we derive from. + * @texture_stage: Device data used to reconstruct binding command. + */ +struct vmw_ctx_bindinfo_tex { + struct vmw_ctx_bindinfo bi; + uint32 texture_stage; +}; + +/** + * struct vmw_ctx_bindinfo_shader - Shader binding metadata + * + * @bi: struct vmw_ctx_bindinfo we derive from. + * @shader_slot: Device data used to reconstruct binding command. + */ +struct vmw_ctx_bindinfo_shader { + struct vmw_ctx_bindinfo bi; + SVGA3dShaderType shader_slot; +}; + +/** + * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata + * + * @bi: struct vmw_ctx_bindinfo we derive from. + * @shader_slot: Device data used to reconstruct binding command. + * @offset: Device data used to reconstruct binding command. + * @size: Device data used to reconstruct binding command. + * @slot: Device data used to reconstruct binding command. + */ +struct vmw_ctx_bindinfo_cb { + struct vmw_ctx_bindinfo bi; + SVGA3dShaderType shader_slot; + uint32 offset; + uint32 size; + uint32 slot; +}; + +/** + * struct vmw_ctx_bindinfo_view - View binding metadata + * + * @bi: struct vmw_ctx_bindinfo we derive from. + * @shader_slot: Device data used to reconstruct binding command. + * @slot: Device data used to reconstruct binding command. + */ +struct vmw_ctx_bindinfo_view { + struct vmw_ctx_bindinfo bi; + SVGA3dShaderType shader_slot; + uint32 slot; +}; + +/** + * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata + * + * @bi: struct vmw_ctx_bindinfo we derive from. + * @offset: Device data used to reconstruct binding command. + * @size: Device data used to reconstruct binding command. + * @slot: Device data used to reconstruct binding command. + */ +struct vmw_ctx_bindinfo_so { + struct vmw_ctx_bindinfo bi; + uint32 offset; + uint32 size; + uint32 slot; +}; + +/** + * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata + * + * @bi: struct vmw_ctx_bindinfo we derive from. + * @offset: Device data used to reconstruct binding command. + * @stride: Device data used to reconstruct binding command. + * @slot: Device data used to reconstruct binding command. + */ +struct vmw_ctx_bindinfo_vb { + struct vmw_ctx_bindinfo bi; + uint32 offset; + uint32 stride; + uint32 slot; +}; + +/** + * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata + * + * @bi: struct vmw_ctx_bindinfo we derive from. + * @offset: Device data used to reconstruct binding command. + * @format: Device data used to reconstruct binding command. + */ +struct vmw_ctx_bindinfo_ib { + struct vmw_ctx_bindinfo bi; + uint32 offset; + uint32 format; +}; + +/** + * struct vmw_dx_shader_bindings - per shader type context binding state + * + * @shader: The shader binding for this shader type + * @const_buffer: Const buffer bindings for this shader type. + * @shader_res: Shader resource view bindings for this shader type. + * @dirty_sr: Bitmap tracking individual shader resource bindings changes + * that have not yet been emitted to the device. + * @dirty: Bitmap tracking per-binding type binding changes that have not + * yet been emitted to the device. + */ +struct vmw_dx_shader_bindings { + struct vmw_ctx_bindinfo_shader shader; + struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS]; + struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS]; + DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS); + unsigned long dirty; +}; + +extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *ci, + u32 shader_slot, u32 slot); +extern void +vmw_binding_state_commit(struct vmw_ctx_binding_state *to, + struct vmw_ctx_binding_state *from); +extern void vmw_binding_res_list_kill(struct list_head *head); +extern void vmw_binding_res_list_scrub(struct list_head *head); +extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs); +extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs); +extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs); +extern struct vmw_ctx_binding_state * +vmw_binding_state_alloc(struct vmw_private *dev_priv); +extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs); +extern struct list_head * +vmw_binding_state_list(struct vmw_ctx_binding_state *cbs); +extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs); + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index cff2bf9db9d2..3329f623c8bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = { .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED }; +static struct ttm_place mob_ne_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +}; + struct ttm_placement vmw_vram_placement = { .num_placement = 1, .placement = &vram_placement_flags, @@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = { .busy_placement = &mob_placement_flags }; +struct ttm_placement vmw_mob_ne_placement = { + .num_placement = 1, + .num_busy_placement = 1, + .placement = &mob_ne_placement_flags, + .busy_placement = &mob_ne_placement_flags +}; + struct vmw_ttm_tt { struct ttm_dma_tt dma_ttm; struct vmw_private *dev_priv; @@ -804,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) /** * vmw_move_notify - TTM move_notify_callback * - * @bo: The TTM buffer object about to move. - * @mem: The truct ttm_mem_reg indicating to what memory - * region the move is taking place. + * @bo: The TTM buffer object about to move. + * @mem: The struct ttm_mem_reg indicating to what memory + * region the move is taking place. * * Calls move_notify for all subsystems needing it. * (currently only resources). @@ -815,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { vmw_resource_move_notify(bo, mem); + vmw_query_move_notify(bo, mem); } /** * vmw_swap_notify - TTM move_notify_callback * - * @bo: The TTM buffer object about to be swapped out. + * @bo: The TTM buffer object about to be swapped out. */ static void vmw_swap_notify(struct ttm_buffer_object *bo) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c new file mode 100644 index 000000000000..5ae8f921da2a --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -0,0 +1,1303 @@ +/************************************************************************** + * + * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "ttm/ttm_bo_api.h" + +/* + * Size of inline command buffers. Try to make sure that a page size is a + * multiple of the DMA pool allocation size. + */ +#define VMW_CMDBUF_INLINE_ALIGN 64 +#define VMW_CMDBUF_INLINE_SIZE \ + (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN)) + +/** + * struct vmw_cmdbuf_context - Command buffer context queues + * + * @submitted: List of command buffers that have been submitted to the + * manager but not yet submitted to hardware. + * @hw_submitted: List of command buffers submitted to hardware. + * @preempted: List of preempted command buffers. + * @num_hw_submitted: Number of buffers currently being processed by hardware + */ +struct vmw_cmdbuf_context { + struct list_head submitted; + struct list_head hw_submitted; + struct list_head preempted; + unsigned num_hw_submitted; +}; + +/** + * struct vmw_cmdbuf_man: - Command buffer manager + * + * @cur_mutex: Mutex protecting the command buffer used for incremental small + * kernel command submissions, @cur. + * @space_mutex: Mutex to protect against starvation when we allocate + * main pool buffer space. + * @work: A struct work_struct implementeing command buffer error handling. + * Immutable. + * @dev_priv: Pointer to the device private struct. Immutable. + * @ctx: Array of command buffer context queues. The queues and the context + * data is protected by @lock. + * @error: List of command buffers that have caused device errors. + * Protected by @lock. + * @mm: Range manager for the command buffer space. Manager allocations and + * frees are protected by @lock. + * @cmd_space: Buffer object for the command buffer space, unless we were + * able to make a contigous coherent DMA memory allocation, @handle. Immutable. + * @map_obj: Mapping state for @cmd_space. Immutable. + * @map: Pointer to command buffer space. May be a mapped buffer object or + * a contigous coherent DMA memory allocation. Immutable. + * @cur: Command buffer for small kernel command submissions. Protected by + * the @cur_mutex. + * @cur_pos: Space already used in @cur. Protected by @cur_mutex. + * @default_size: Default size for the @cur command buffer. Immutable. + * @max_hw_submitted: Max number of in-flight command buffers the device can + * handle. Immutable. + * @lock: Spinlock protecting command submission queues. + * @header: Pool of DMA memory for device command buffer headers. + * Internal protection. + * @dheaders: Pool of DMA memory for device command buffer headers with trailing + * space for inline data. Internal protection. + * @tasklet: Tasklet struct for irq processing. Immutable. + * @alloc_queue: Wait queue for processes waiting to allocate command buffer + * space. + * @idle_queue: Wait queue for processes waiting for command buffer idle. + * @irq_on: Whether the process function has requested irq to be turned on. + * Protected by @lock. + * @using_mob: Whether the command buffer space is a MOB or a contigous DMA + * allocation. Immutable. + * @has_pool: Has a large pool of DMA memory which allows larger allocations. + * Typically this is false only during bootstrap. + * @handle: DMA address handle for the command buffer space if @using_mob is + * false. Immutable. + * @size: The size of the command buffer space. Immutable. + */ +struct vmw_cmdbuf_man { + struct mutex cur_mutex; + struct mutex space_mutex; + struct work_struct work; + struct vmw_private *dev_priv; + struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; + struct list_head error; + struct drm_mm mm; + struct ttm_buffer_object *cmd_space; + struct ttm_bo_kmap_obj map_obj; + u8 *map; + struct vmw_cmdbuf_header *cur; + size_t cur_pos; + size_t default_size; + unsigned max_hw_submitted; + spinlock_t lock; + struct dma_pool *headers; + struct dma_pool *dheaders; + struct tasklet_struct tasklet; + wait_queue_head_t alloc_queue; + wait_queue_head_t idle_queue; + bool irq_on; + bool using_mob; + bool has_pool; + dma_addr_t handle; + size_t size; +}; + +/** + * struct vmw_cmdbuf_header - Command buffer metadata + * + * @man: The command buffer manager. + * @cb_header: Device command buffer header, allocated from a DMA pool. + * @cb_context: The device command buffer context. + * @list: List head for attaching to the manager lists. + * @node: The range manager node. + * @handle. The DMA address of @cb_header. Handed to the device on command + * buffer submission. + * @cmd: Pointer to the command buffer space of this buffer. + * @size: Size of the command buffer space of this buffer. + * @reserved: Reserved space of this buffer. + * @inline_space: Whether inline command buffer space is used. + */ +struct vmw_cmdbuf_header { + struct vmw_cmdbuf_man *man; + SVGACBHeader *cb_header; + SVGACBContext cb_context; + struct list_head list; + struct drm_mm_node node; + dma_addr_t handle; + u8 *cmd; + size_t size; + size_t reserved; + bool inline_space; +}; + +/** + * struct vmw_cmdbuf_dheader - Device command buffer header with inline + * command buffer space. + * + * @cb_header: Device command buffer header. + * @cmd: Inline command buffer space. + */ +struct vmw_cmdbuf_dheader { + SVGACBHeader cb_header; + u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN); +}; + +/** + * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata + * + * @page_size: Size of requested command buffer space in pages. + * @node: Pointer to the range manager node. + * @done: True if this allocation has succeeded. + */ +struct vmw_cmdbuf_alloc_info { + size_t page_size; + struct drm_mm_node *node; + bool done; +}; + +/* Loop over each context in the command buffer manager. */ +#define for_each_cmdbuf_ctx(_man, _i, _ctx) \ + for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ + ++(_i), ++(_ctx)) + +static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable); + + +/** + * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. + * + * @man: The range manager. + * @interruptible: Whether to wait interruptible when locking. + */ +static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) +{ + if (interruptible) { + if (mutex_lock_interruptible(&man->cur_mutex)) + return -ERESTARTSYS; + } else { + mutex_lock(&man->cur_mutex); + } + + return 0; +} + +/** + * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex. + * + * @man: The range manager. + */ +static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) +{ + mutex_unlock(&man->cur_mutex); +} + +/** + * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has + * been used for the device context with inline command buffers. + * Need not be called locked. + * + * @header: Pointer to the header to free. + */ +static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) +{ + struct vmw_cmdbuf_dheader *dheader; + + if (WARN_ON_ONCE(!header->inline_space)) + return; + + dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader, + cb_header); + dma_pool_free(header->man->dheaders, dheader, header->handle); + kfree(header); +} + +/** + * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its + * associated structures. + * + * header: Pointer to the header to free. + * + * For internal use. Must be called with man::lock held. + */ +static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) +{ + struct vmw_cmdbuf_man *man = header->man; + + BUG_ON(!spin_is_locked(&man->lock)); + + if (header->inline_space) { + vmw_cmdbuf_header_inline_free(header); + return; + } + + drm_mm_remove_node(&header->node); + wake_up_all(&man->alloc_queue); + if (header->cb_header) + dma_pool_free(man->headers, header->cb_header, + header->handle); + kfree(header); +} + +/** + * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its + * associated structures. + * + * @header: Pointer to the header to free. + */ +void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) +{ + struct vmw_cmdbuf_man *man = header->man; + + /* Avoid locking if inline_space */ + if (header->inline_space) { + vmw_cmdbuf_header_inline_free(header); + return; + } + spin_lock_bh(&man->lock); + __vmw_cmdbuf_header_free(header); + spin_unlock_bh(&man->lock); +} + + +/** + * vmw_cmbuf_header_submit: Submit a command buffer to hardware. + * + * @header: The header of the buffer to submit. + */ +static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) +{ + struct vmw_cmdbuf_man *man = header->man; + u32 val; + + if (sizeof(header->handle) > 4) + val = (header->handle >> 32); + else + val = 0; + vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); + + val = (header->handle & 0xFFFFFFFFULL); + val |= header->cb_context & SVGA_CB_CONTEXT_MASK; + vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); + + return header->cb_header->status; +} + +/** + * vmw_cmdbuf_ctx_init: Initialize a command buffer context. + * + * @ctx: The command buffer context to initialize + */ +static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx) +{ + INIT_LIST_HEAD(&ctx->hw_submitted); + INIT_LIST_HEAD(&ctx->submitted); + INIT_LIST_HEAD(&ctx->preempted); + ctx->num_hw_submitted = 0; +} + +/** + * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer + * context. + * + * @man: The command buffer manager. + * @ctx: The command buffer context. + * + * Submits command buffers to hardware until there are no more command + * buffers to submit or the hardware can't handle more command buffers. + */ +static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, + struct vmw_cmdbuf_context *ctx) +{ + while (ctx->num_hw_submitted < man->max_hw_submitted && + !list_empty(&ctx->submitted)) { + struct vmw_cmdbuf_header *entry; + SVGACBStatus status; + + entry = list_first_entry(&ctx->submitted, + struct vmw_cmdbuf_header, + list); + + status = vmw_cmdbuf_header_submit(entry); + + /* This should never happen */ + if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) { + entry->cb_header->status = SVGA_CB_STATUS_NONE; + break; + } + + list_del(&entry->list); + list_add_tail(&entry->list, &ctx->hw_submitted); + ctx->num_hw_submitted++; + } + +} + +/** + * vmw_cmdbuf_ctx_submit: Process a command buffer context. + * + * @man: The command buffer manager. + * @ctx: The command buffer context. + * + * Submit command buffers to hardware if possible, and process finished + * buffers. Typically freeing them, but on preemption or error take + * appropriate action. Wake up waiters if appropriate. + */ +static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, + struct vmw_cmdbuf_context *ctx, + int *notempty) +{ + struct vmw_cmdbuf_header *entry, *next; + + vmw_cmdbuf_ctx_submit(man, ctx); + + list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { + SVGACBStatus status = entry->cb_header->status; + + if (status == SVGA_CB_STATUS_NONE) + break; + + list_del(&entry->list); + wake_up_all(&man->idle_queue); + ctx->num_hw_submitted--; + switch (status) { + case SVGA_CB_STATUS_COMPLETED: + __vmw_cmdbuf_header_free(entry); + break; + case SVGA_CB_STATUS_COMMAND_ERROR: + case SVGA_CB_STATUS_CB_HEADER_ERROR: + list_add_tail(&entry->list, &man->error); + schedule_work(&man->work); + break; + case SVGA_CB_STATUS_PREEMPTED: + list_add(&entry->list, &ctx->preempted); + break; + default: + WARN_ONCE(true, "Undefined command buffer status.\n"); + __vmw_cmdbuf_header_free(entry); + break; + } + } + + vmw_cmdbuf_ctx_submit(man, ctx); + if (!list_empty(&ctx->submitted)) + (*notempty)++; +} + +/** + * vmw_cmdbuf_man_process - Process all command buffer contexts and + * switch on and off irqs as appropriate. + * + * @man: The command buffer manager. + * + * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has + * command buffers left that are not submitted to hardware, Make sure + * IRQ handling is turned on. Otherwise, make sure it's turned off. This + * function may return -EAGAIN to indicate it should be rerun due to + * possibly missed IRQs if IRQs has just been turned on. + */ +static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) +{ + int notempty = 0; + struct vmw_cmdbuf_context *ctx; + int i; + + for_each_cmdbuf_ctx(man, i, ctx) + vmw_cmdbuf_ctx_process(man, ctx, ¬empty); + + if (man->irq_on && !notempty) { + vmw_generic_waiter_remove(man->dev_priv, + SVGA_IRQFLAG_COMMAND_BUFFER, + &man->dev_priv->cmdbuf_waiters); + man->irq_on = false; + } else if (!man->irq_on && notempty) { + vmw_generic_waiter_add(man->dev_priv, + SVGA_IRQFLAG_COMMAND_BUFFER, + &man->dev_priv->cmdbuf_waiters); + man->irq_on = true; + + /* Rerun in case we just missed an irq. */ + return -EAGAIN; + } + + return 0; +} + +/** + * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a + * command buffer context + * + * @man: The command buffer manager. + * @header: The header of the buffer to submit. + * @cb_context: The command buffer context to use. + * + * This function adds @header to the "submitted" queue of the command + * buffer context identified by @cb_context. It then calls the command buffer + * manager processing to potentially submit the buffer to hardware. + * @man->lock needs to be held when calling this function. + */ +static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, + struct vmw_cmdbuf_header *header, + SVGACBContext cb_context) +{ + if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT)) + header->cb_header->dxContext = 0; + header->cb_context = cb_context; + list_add_tail(&header->list, &man->ctx[cb_context].submitted); + + if (vmw_cmdbuf_man_process(man) == -EAGAIN) + vmw_cmdbuf_man_process(man); +} + +/** + * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt + * handler implemented as a tasklet. + * + * @data: Tasklet closure. A pointer to the command buffer manager cast to + * an unsigned long. + * + * The bottom half (tasklet) of the interrupt handler simply calls into the + * command buffer processor to free finished buffers and submit any + * queued buffers to hardware. + */ +static void vmw_cmdbuf_man_tasklet(unsigned long data) +{ + struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; + + spin_lock(&man->lock); + if (vmw_cmdbuf_man_process(man) == -EAGAIN) + (void) vmw_cmdbuf_man_process(man); + spin_unlock(&man->lock); +} + +/** + * vmw_cmdbuf_work_func - The deferred work function that handles + * command buffer errors. + * + * @work: The work func closure argument. + * + * Restarting the command buffer context after an error requires process + * context, so it is deferred to this work function. + */ +static void vmw_cmdbuf_work_func(struct work_struct *work) +{ + struct vmw_cmdbuf_man *man = + container_of(work, struct vmw_cmdbuf_man, work); + struct vmw_cmdbuf_header *entry, *next; + bool restart = false; + + spin_lock_bh(&man->lock); + list_for_each_entry_safe(entry, next, &man->error, list) { + restart = true; + DRM_ERROR("Command buffer error.\n"); + + list_del(&entry->list); + __vmw_cmdbuf_header_free(entry); + wake_up_all(&man->idle_queue); + } + spin_unlock_bh(&man->lock); + + if (restart && vmw_cmdbuf_startstop(man, true)) + DRM_ERROR("Failed restarting command buffer context 0.\n"); + +} + +/** + * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. + * + * @man: The command buffer manager. + * @check_preempted: Check also the preempted queue for pending command buffers. + * + */ +static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, + bool check_preempted) +{ + struct vmw_cmdbuf_context *ctx; + bool idle = false; + int i; + + spin_lock_bh(&man->lock); + vmw_cmdbuf_man_process(man); + for_each_cmdbuf_ctx(man, i, ctx) { + if (!list_empty(&ctx->submitted) || + !list_empty(&ctx->hw_submitted) || + (check_preempted && !list_empty(&ctx->preempted))) + goto out_unlock; + } + + idle = list_empty(&man->error); + +out_unlock: + spin_unlock_bh(&man->lock); + + return idle; +} + +/** + * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel + * command submissions + * + * @man: The command buffer manager. + * + * Flushes the current command buffer without allocating a new one. A new one + * is automatically allocated when needed. Call with @man->cur_mutex held. + */ +static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) +{ + struct vmw_cmdbuf_header *cur = man->cur; + + WARN_ON(!mutex_is_locked(&man->cur_mutex)); + + if (!cur) + return; + + spin_lock_bh(&man->lock); + if (man->cur_pos == 0) { + __vmw_cmdbuf_header_free(cur); + goto out_unlock; + } + + man->cur->cb_header->length = man->cur_pos; + vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); +out_unlock: + spin_unlock_bh(&man->lock); + man->cur = NULL; + man->cur_pos = 0; +} + +/** + * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel + * command submissions + * + * @man: The command buffer manager. + * @interruptible: Whether to sleep interruptible when sleeping. + * + * Flushes the current command buffer without allocating a new one. A new one + * is automatically allocated when needed. + */ +int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, + bool interruptible) +{ + int ret = vmw_cmdbuf_cur_lock(man, interruptible); + + if (ret) + return ret; + + __vmw_cmdbuf_cur_flush(man); + vmw_cmdbuf_cur_unlock(man); + + return 0; +} + +/** + * vmw_cmdbuf_idle - Wait for command buffer manager idle. + * + * @man: The command buffer manager. + * @interruptible: Sleep interruptible while waiting. + * @timeout: Time out after this many ticks. + * + * Wait until the command buffer manager has processed all command buffers, + * or until a timeout occurs. If a timeout occurs, the function will return + * -EBUSY. + */ +int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, + unsigned long timeout) +{ + int ret; + + ret = vmw_cmdbuf_cur_flush(man, interruptible); + vmw_generic_waiter_add(man->dev_priv, + SVGA_IRQFLAG_COMMAND_BUFFER, + &man->dev_priv->cmdbuf_waiters); + + if (interruptible) { + ret = wait_event_interruptible_timeout + (man->idle_queue, vmw_cmdbuf_man_idle(man, true), + timeout); + } else { + ret = wait_event_timeout + (man->idle_queue, vmw_cmdbuf_man_idle(man, true), + timeout); + } + vmw_generic_waiter_remove(man->dev_priv, + SVGA_IRQFLAG_COMMAND_BUFFER, + &man->dev_priv->cmdbuf_waiters); + if (ret == 0) { + if (!vmw_cmdbuf_man_idle(man, true)) + ret = -EBUSY; + else + ret = 0; + } + if (ret > 0) + ret = 0; + + return ret; +} + +/** + * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. + * + * @man: The command buffer manager. + * @info: Allocation info. Will hold the size on entry and allocated mm node + * on successful return. + * + * Try to allocate buffer space from the main pool. Returns true if succeeded. + * If a fatal error was hit, the error code is returned in @info->ret. + */ +static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, + struct vmw_cmdbuf_alloc_info *info) +{ + int ret; + + if (info->done) + return true; + + memset(info->node, 0, sizeof(*info->node)); + spin_lock_bh(&man->lock); + ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, + 0, 0, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + spin_unlock_bh(&man->lock); + info->done = !ret; + + return info->done; +} + +/** + * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. + * + * @man: The command buffer manager. + * @node: Pointer to pre-allocated range-manager node. + * @size: The size of the allocation. + * @interruptible: Whether to sleep interruptible while waiting for space. + * + * This function allocates buffer space from the main pool, and if there is + * no space available ATM, it turns on IRQ handling and sleeps waiting for it to + * become available. + */ +static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, + struct drm_mm_node *node, + size_t size, + bool interruptible) +{ + struct vmw_cmdbuf_alloc_info info; + + info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; + info.node = node; + info.done = false; + + /* + * To prevent starvation of large requests, only one allocating call + * at a time waiting for space. + */ + if (interruptible) { + if (mutex_lock_interruptible(&man->space_mutex)) + return -ERESTARTSYS; + } else { + mutex_lock(&man->space_mutex); + } + + /* Try to allocate space without waiting. */ + if (vmw_cmdbuf_try_alloc(man, &info)) + goto out_unlock; + + vmw_generic_waiter_add(man->dev_priv, + SVGA_IRQFLAG_COMMAND_BUFFER, + &man->dev_priv->cmdbuf_waiters); + + if (interruptible) { + int ret; + + ret = wait_event_interruptible + (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); + if (ret) { + vmw_generic_waiter_remove + (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, + &man->dev_priv->cmdbuf_waiters); + mutex_unlock(&man->space_mutex); + return ret; + } + } else { + wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); + } + vmw_generic_waiter_remove(man->dev_priv, + SVGA_IRQFLAG_COMMAND_BUFFER, + &man->dev_priv->cmdbuf_waiters); + +out_unlock: + mutex_unlock(&man->space_mutex); + + return 0; +} + +/** + * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer + * space from the main pool. + * + * @man: The command buffer manager. + * @header: Pointer to the header to set up. + * @size: The requested size of the buffer space. + * @interruptible: Whether to sleep interruptible while waiting for space. + */ +static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, + struct vmw_cmdbuf_header *header, + size_t size, + bool interruptible) +{ + SVGACBHeader *cb_hdr; + size_t offset; + int ret; + + if (!man->has_pool) + return -ENOMEM; + + ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); + + if (ret) + return ret; + + header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, + &header->handle); + if (!header->cb_header) { + ret = -ENOMEM; + goto out_no_cb_header; + } + + header->size = header->node.size << PAGE_SHIFT; + cb_hdr = header->cb_header; + offset = header->node.start << PAGE_SHIFT; + header->cmd = man->map + offset; + memset(cb_hdr, 0, sizeof(*cb_hdr)); + if (man->using_mob) { + cb_hdr->flags = SVGA_CB_FLAG_MOB; + cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; + cb_hdr->ptr.mob.mobOffset = offset; + } else { + cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; + } + + return 0; + +out_no_cb_header: + spin_lock_bh(&man->lock); + drm_mm_remove_node(&header->node); + spin_unlock_bh(&man->lock); + + return ret; +} + +/** + * vmw_cmdbuf_space_inline - Set up a command buffer header with + * inline command buffer space. + * + * @man: The command buffer manager. + * @header: Pointer to the header to set up. + * @size: The requested size of the buffer space. + */ +static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, + struct vmw_cmdbuf_header *header, + int size) +{ + struct vmw_cmdbuf_dheader *dheader; + SVGACBHeader *cb_hdr; + + if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) + return -ENOMEM; + + dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, + &header->handle); + if (!dheader) + return -ENOMEM; + + header->inline_space = true; + header->size = VMW_CMDBUF_INLINE_SIZE; + cb_hdr = &dheader->cb_header; + header->cb_header = cb_hdr; + header->cmd = dheader->cmd; + memset(dheader, 0, sizeof(*dheader)); + cb_hdr->status = SVGA_CB_STATUS_NONE; + cb_hdr->flags = SVGA_CB_FLAG_NONE; + cb_hdr->ptr.pa = (u64)header->handle + + (u64)offsetof(struct vmw_cmdbuf_dheader, cmd); + + return 0; +} + +/** + * vmw_cmdbuf_alloc - Allocate a command buffer header complete with + * command buffer space. + * + * @man: The command buffer manager. + * @size: The requested size of the buffer space. + * @interruptible: Whether to sleep interruptible while waiting for space. + * @p_header: points to a header pointer to populate on successful return. + * + * Returns a pointer to command buffer space if successful. Otherwise + * returns an error pointer. The header pointer returned in @p_header should + * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit(). + */ +void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, + size_t size, bool interruptible, + struct vmw_cmdbuf_header **p_header) +{ + struct vmw_cmdbuf_header *header; + int ret = 0; + + *p_header = NULL; + + header = kzalloc(sizeof(*header), GFP_KERNEL); + if (!header) + return ERR_PTR(-ENOMEM); + + if (size <= VMW_CMDBUF_INLINE_SIZE) + ret = vmw_cmdbuf_space_inline(man, header, size); + else + ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); + + if (ret) { + kfree(header); + return ERR_PTR(ret); + } + + header->man = man; + INIT_LIST_HEAD(&header->list); + header->cb_header->status = SVGA_CB_STATUS_NONE; + *p_header = header; + + return header->cmd; +} + +/** + * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current + * command buffer. + * + * @man: The command buffer manager. + * @size: The requested size of the commands. + * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. + * @interruptible: Whether to sleep interruptible while waiting for space. + * + * Returns a pointer to command buffer space if successful. Otherwise + * returns an error pointer. + */ +static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, + size_t size, + int ctx_id, + bool interruptible) +{ + struct vmw_cmdbuf_header *cur; + void *ret; + + if (vmw_cmdbuf_cur_lock(man, interruptible)) + return ERR_PTR(-ERESTARTSYS); + + cur = man->cur; + if (cur && (size + man->cur_pos > cur->size || + ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && + ctx_id != cur->cb_header->dxContext))) + __vmw_cmdbuf_cur_flush(man); + + if (!man->cur) { + ret = vmw_cmdbuf_alloc(man, + max_t(size_t, size, man->default_size), + interruptible, &man->cur); + if (IS_ERR(ret)) { + vmw_cmdbuf_cur_unlock(man); + return ret; + } + + cur = man->cur; + } + + if (ctx_id != SVGA3D_INVALID_ID) { + cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; + cur->cb_header->dxContext = ctx_id; + } + + cur->reserved = size; + + return (void *) (man->cur->cmd + man->cur_pos); +} + +/** + * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer. + * + * @man: The command buffer manager. + * @size: The size of the commands actually written. + * @flush: Whether to flush the command buffer immediately. + */ +static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, + size_t size, bool flush) +{ + struct vmw_cmdbuf_header *cur = man->cur; + + WARN_ON(!mutex_is_locked(&man->cur_mutex)); + + WARN_ON(size > cur->reserved); + man->cur_pos += size; + if (!size) + cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; + if (flush) + __vmw_cmdbuf_cur_flush(man); + vmw_cmdbuf_cur_unlock(man); +} + +/** + * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer. + * + * @man: The command buffer manager. + * @size: The requested size of the commands. + * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. + * @interruptible: Whether to sleep interruptible while waiting for space. + * @header: Header of the command buffer. NULL if the current command buffer + * should be used. + * + * Returns a pointer to command buffer space if successful. Otherwise + * returns an error pointer. + */ +void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, + int ctx_id, bool interruptible, + struct vmw_cmdbuf_header *header) +{ + if (!header) + return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); + + if (size > header->size) + return ERR_PTR(-EINVAL); + + if (ctx_id != SVGA3D_INVALID_ID) { + header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; + header->cb_header->dxContext = ctx_id; + } + + header->reserved = size; + return header->cmd; +} + +/** + * vmw_cmdbuf_commit - Commit commands in a command buffer. + * + * @man: The command buffer manager. + * @size: The size of the commands actually written. + * @header: Header of the command buffer. NULL if the current command buffer + * should be used. + * @flush: Whether to flush the command buffer immediately. + */ +void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, + struct vmw_cmdbuf_header *header, bool flush) +{ + if (!header) { + vmw_cmdbuf_commit_cur(man, size, flush); + return; + } + + (void) vmw_cmdbuf_cur_lock(man, false); + __vmw_cmdbuf_cur_flush(man); + WARN_ON(size > header->reserved); + man->cur = header; + man->cur_pos = size; + if (!size) + header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; + if (flush) + __vmw_cmdbuf_cur_flush(man); + vmw_cmdbuf_cur_unlock(man); +} + +/** + * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half. + * + * @man: The command buffer manager. + */ +void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man) +{ + if (!man) + return; + + tasklet_schedule(&man->tasklet); +} + +/** + * vmw_cmdbuf_send_device_command - Send a command through the device context. + * + * @man: The command buffer manager. + * @command: Pointer to the command to send. + * @size: Size of the command. + * + * Synchronously sends a device context command. + */ +static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, + const void *command, + size_t size) +{ + struct vmw_cmdbuf_header *header; + int status; + void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); + + if (IS_ERR(cmd)) + return PTR_ERR(cmd); + + memcpy(cmd, command, size); + header->cb_header->length = size; + header->cb_context = SVGA_CB_CONTEXT_DEVICE; + spin_lock_bh(&man->lock); + status = vmw_cmdbuf_header_submit(header); + spin_unlock_bh(&man->lock); + vmw_cmdbuf_header_free(header); + + if (status != SVGA_CB_STATUS_COMPLETED) { + DRM_ERROR("Device context command failed with status %d\n", + status); + return -EINVAL; + } + + return 0; +} + +/** + * vmw_cmdbuf_startstop - Send a start / stop command through the device + * context. + * + * @man: The command buffer manager. + * @enable: Whether to enable or disable the context. + * + * Synchronously sends a device start / stop context command. + */ +static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, + bool enable) +{ + struct { + uint32 id; + SVGADCCmdStartStop body; + } __packed cmd; + + cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; + cmd.body.enable = (enable) ? 1 : 0; + cmd.body.context = SVGA_CB_CONTEXT_0; + + return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); +} + +/** + * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes + * + * @man: The command buffer manager. + * @size: The size of the main space pool. + * @default_size: The default size of the command buffer for small kernel + * submissions. + * + * Set the size and allocate the main command buffer space pool, + * as well as the default size of the command buffer for + * small kernel submissions. If successful, this enables large command + * submissions. Note that this function requires that rudimentary command + * submission is already available and that the MOB memory manager is alive. + * Returns 0 on success. Negative error code on failure. + */ +int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, + size_t size, size_t default_size) +{ + struct vmw_private *dev_priv = man->dev_priv; + bool dummy; + int ret; + + if (man->has_pool) + return -EINVAL; + + /* First, try to allocate a huge chunk of DMA memory */ + size = PAGE_ALIGN(size); + man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, + &man->handle, GFP_KERNEL); + if (man->map) { + man->using_mob = false; + } else { + /* + * DMA memory failed. If we can have command buffers in a + * MOB, try to use that instead. Note that this will + * actually call into the already enabled manager, when + * binding the MOB. + */ + if (!(dev_priv->capabilities & SVGA_CAP_DX)) + return -ENOMEM; + + ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, + &vmw_mob_ne_placement, 0, false, NULL, + &man->cmd_space); + if (ret) + return ret; + + man->using_mob = true; + ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, + &man->map_obj); + if (ret) + goto out_no_map; + + man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); + } + + man->size = size; + drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); + + man->has_pool = true; + man->default_size = default_size; + DRM_INFO("Using command buffers with %s pool.\n", + (man->using_mob) ? "MOB" : "DMA"); + + return 0; + +out_no_map: + if (man->using_mob) + ttm_bo_unref(&man->cmd_space); + + return ret; +} + +/** + * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for + * inline command buffer submissions only. + * + * @dev_priv: Pointer to device private structure. + * + * Returns a pointer to a cummand buffer manager to success or error pointer + * on failure. The command buffer manager will be enabled for submissions of + * size VMW_CMDBUF_INLINE_SIZE only. + */ +struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) +{ + struct vmw_cmdbuf_man *man; + struct vmw_cmdbuf_context *ctx; + int i; + int ret; + + if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) + return ERR_PTR(-ENOSYS); + + man = kzalloc(sizeof(*man), GFP_KERNEL); + if (!man) + return ERR_PTR(-ENOMEM); + + man->headers = dma_pool_create("vmwgfx cmdbuf", + &dev_priv->dev->pdev->dev, + sizeof(SVGACBHeader), + 64, PAGE_SIZE); + if (!man->headers) { + ret = -ENOMEM; + goto out_no_pool; + } + + man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", + &dev_priv->dev->pdev->dev, + sizeof(struct vmw_cmdbuf_dheader), + 64, PAGE_SIZE); + if (!man->dheaders) { + ret = -ENOMEM; + goto out_no_dpool; + } + + for_each_cmdbuf_ctx(man, i, ctx) + vmw_cmdbuf_ctx_init(ctx); + + INIT_LIST_HEAD(&man->error); + spin_lock_init(&man->lock); + mutex_init(&man->cur_mutex); + mutex_init(&man->space_mutex); + tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet, + (unsigned long) man); + man->default_size = VMW_CMDBUF_INLINE_SIZE; + init_waitqueue_head(&man->alloc_queue); + init_waitqueue_head(&man->idle_queue); + man->dev_priv = dev_priv; + man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; + INIT_WORK(&man->work, &vmw_cmdbuf_work_func); + vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, + &dev_priv->error_waiters); + ret = vmw_cmdbuf_startstop(man, true); + if (ret) { + DRM_ERROR("Failed starting command buffer context 0.\n"); + vmw_cmdbuf_man_destroy(man); + return ERR_PTR(ret); + } + + return man; + +out_no_dpool: + dma_pool_destroy(man->headers); +out_no_pool: + kfree(man); + + return ERR_PTR(ret); +} + +/** + * vmw_cmdbuf_remove_pool - Take down the main buffer space pool. + * + * @man: Pointer to a command buffer manager. + * + * This function removes the main buffer space pool, and should be called + * before MOB memory management is removed. When this function has been called, + * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or + * less are allowed, and the default size of the command buffer for small kernel + * submissions is also set to this size. + */ +void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) +{ + if (!man->has_pool) + return; + + man->has_pool = false; + man->default_size = VMW_CMDBUF_INLINE_SIZE; + (void) vmw_cmdbuf_idle(man, false, 10*HZ); + if (man->using_mob) { + (void) ttm_bo_kunmap(&man->map_obj); + ttm_bo_unref(&man->cmd_space); + } else { + dma_free_coherent(&man->dev_priv->dev->pdev->dev, + man->size, man->map, man->handle); + } +} + +/** + * vmw_cmdbuf_man_destroy - Take down a command buffer manager. + * + * @man: Pointer to a command buffer manager. + * + * This function idles and then destroys a command buffer manager. + */ +void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) +{ + WARN_ON_ONCE(man->has_pool); + (void) vmw_cmdbuf_idle(man, false, 10*HZ); + if (vmw_cmdbuf_startstop(man, false)) + DRM_ERROR("Failed stopping command buffer context 0.\n"); + + vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, + &man->dev_priv->error_waiters); + tasklet_kill(&man->tasklet); + (void) cancel_work_sync(&man->work); + dma_pool_destroy(man->dheaders); + dma_pool_destroy(man->headers); + mutex_destroy(&man->cur_mutex); + mutex_destroy(&man->space_mutex); + kfree(man); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 21e9b7f8dad0..13db8a2851ed 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,15 +26,10 @@ **************************************************************************/ #include "vmwgfx_drv.h" +#include "vmwgfx_resource_priv.h" #define VMW_CMDBUF_RES_MAN_HT_ORDER 12 -enum vmw_cmdbuf_res_state { - VMW_CMDBUF_RES_COMMITED, - VMW_CMDBUF_RES_ADD, - VMW_CMDBUF_RES_DEL -}; - /** * struct vmw_cmdbuf_res - Command buffer managed resource entry. * @@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list) list_for_each_entry_safe(entry, next, list, head) { list_del(&entry->head); + if (entry->res->func->commit_notify) + entry->res->func->commit_notify(entry->res, + entry->state); switch (entry->state) { case VMW_CMDBUF_RES_ADD: - entry->state = VMW_CMDBUF_RES_COMMITED; + entry->state = VMW_CMDBUF_RES_COMMITTED; list_add_tail(&entry->head, &entry->man->list); break; case VMW_CMDBUF_RES_DEL: @@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) &entry->hash); list_del(&entry->head); list_add_tail(&entry->head, &entry->man->list); - entry->state = VMW_CMDBUF_RES_COMMITED; + entry->state = VMW_CMDBUF_RES_COMMITTED; break; default: BUG(); @@ -231,6 +229,9 @@ out_invalid_key: * @res_type: The resource type. * @user_key: The user-space id of the resource. * @list: The staging list. + * @res_p: If the resource is in an already committed state, points to the + * struct vmw_resource on successful return. The pointer will be + * non ref-counted. * * This function looks up the struct vmw_cmdbuf_res entry from the manager * hash table and, if it exists, removes it. Depending on its current staging @@ -240,7 +241,8 @@ out_invalid_key: int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, u32 user_key, - struct list_head *list) + struct list_head *list, + struct vmw_resource **res_p) { struct vmw_cmdbuf_res *entry; struct drm_hash_item *hash; @@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, switch (entry->state) { case VMW_CMDBUF_RES_ADD: vmw_cmdbuf_res_free(man, entry); + *res_p = NULL; break; - case VMW_CMDBUF_RES_COMMITED: + case VMW_CMDBUF_RES_COMMITTED: (void) drm_ht_remove_item(&man->resources, &entry->hash); list_del(&entry->head); entry->state = VMW_CMDBUF_RES_DEL; list_add_tail(&entry->head, list); + *res_p = entry->res; break; default: BUG(); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 5ac92874404d..443d1ed00de7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -27,19 +27,19 @@ #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" +#include "vmwgfx_binding.h" #include "ttm/ttm_placement.h" struct vmw_user_context { struct ttm_base_object base; struct vmw_resource res; - struct vmw_ctx_binding_state cbs; + struct vmw_ctx_binding_state *cbs; struct vmw_cmdbuf_res_manager *man; + struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; + spinlock_t cotable_lock; + struct vmw_dma_buffer *dx_query_mob; }; - - -typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); - static void vmw_user_context_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_context_base_to_res(struct ttm_base_object *base); @@ -51,12 +51,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf); static int vmw_gb_context_destroy(struct vmw_resource *res); -static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); -static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, - bool rebind); -static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); -static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); -static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); +static int vmw_dx_context_create(struct vmw_resource *res); +static int vmw_dx_context_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf); +static int vmw_dx_context_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf); +static int vmw_dx_context_destroy(struct vmw_resource *res); + static uint64_t vmw_user_context_size; static const struct vmw_user_resource_conv user_context_conv = { @@ -93,15 +95,38 @@ static const struct vmw_res_func vmw_gb_context_func = { .unbind = vmw_gb_context_unbind }; -static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { - [vmw_ctx_binding_shader] = vmw_context_scrub_shader, - [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, - [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; +static const struct vmw_res_func vmw_dx_context_func = { + .res_type = vmw_res_dx_context, + .needs_backup = true, + .may_evict = true, + .type_name = "dx contexts", + .backup_placement = &vmw_mob_placement, + .create = vmw_dx_context_create, + .destroy = vmw_dx_context_destroy, + .bind = vmw_dx_context_bind, + .unbind = vmw_dx_context_unbind +}; /** * Context management: */ +static void vmw_context_cotables_unref(struct vmw_user_context *uctx) +{ + struct vmw_resource *res; + int i; + + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { + spin_lock(&uctx->cotable_lock); + res = uctx->cotables[i]; + uctx->cotables[i] = NULL; + spin_unlock(&uctx->cotable_lock); + + if (res) + vmw_resource_unreference(&res); + } +} + static void vmw_hw_context_destroy(struct vmw_resource *res) { struct vmw_user_context *uctx = @@ -113,17 +138,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) } *cmd; - if (res->func->destroy == vmw_gb_context_destroy) { + if (res->func->destroy == vmw_gb_context_destroy || + res->func->destroy == vmw_dx_context_destroy) { mutex_lock(&dev_priv->cmdbuf_mutex); vmw_cmdbuf_res_man_destroy(uctx->man); mutex_lock(&dev_priv->binding_mutex); - (void) vmw_context_binding_state_kill(&uctx->cbs); - (void) vmw_gb_context_destroy(res); + vmw_binding_state_kill(uctx->cbs); + (void) res->func->destroy(res); mutex_unlock(&dev_priv->binding_mutex); if (dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid) __vmw_execbuf_release_pinned_bo(dev_priv, NULL); mutex_unlock(&dev_priv->cmdbuf_mutex); + vmw_context_cotables_unref(uctx); return; } @@ -135,43 +162,67 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) return; } - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); - cmd->header.size = cpu_to_le32(sizeof(cmd->body)); - cmd->body.cid = cpu_to_le32(res->id); + cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; vmw_fifo_commit(dev_priv, sizeof(*cmd)); - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); } static int vmw_gb_context_init(struct vmw_private *dev_priv, + bool dx, struct vmw_resource *res, - void (*res_free) (struct vmw_resource *res)) + void (*res_free)(struct vmw_resource *res)) { - int ret; + int ret, i; struct vmw_user_context *uctx = container_of(res, struct vmw_user_context, res); + res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : + SVGA3D_CONTEXT_DATA_SIZE); ret = vmw_resource_init(dev_priv, res, true, - res_free, &vmw_gb_context_func); - res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; + res_free, + dx ? &vmw_dx_context_func : + &vmw_gb_context_func); if (unlikely(ret != 0)) goto out_err; if (dev_priv->has_mob) { uctx->man = vmw_cmdbuf_res_man_create(dev_priv); - if (unlikely(IS_ERR(uctx->man))) { + if (IS_ERR(uctx->man)) { ret = PTR_ERR(uctx->man); uctx->man = NULL; goto out_err; } } - memset(&uctx->cbs, 0, sizeof(uctx->cbs)); - INIT_LIST_HEAD(&uctx->cbs.list); + uctx->cbs = vmw_binding_state_alloc(dev_priv); + if (IS_ERR(uctx->cbs)) { + ret = PTR_ERR(uctx->cbs); + goto out_err; + } + + spin_lock_init(&uctx->cotable_lock); + + if (dx) { + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { + uctx->cotables[i] = vmw_cotable_alloc(dev_priv, + &uctx->res, i); + if (unlikely(uctx->cotables[i] == NULL)) { + ret = -ENOMEM; + goto out_cotables; + } + } + } + + vmw_resource_activate(res, vmw_hw_context_destroy); return 0; +out_cotables: + vmw_context_cotables_unref(uctx); out_err: if (res_free) res_free(res); @@ -182,7 +233,8 @@ out_err: static int vmw_context_init(struct vmw_private *dev_priv, struct vmw_resource *res, - void (*res_free) (struct vmw_resource *res)) + void (*res_free)(struct vmw_resource *res), + bool dx) { int ret; @@ -192,7 +244,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, } *cmd; if (dev_priv->has_mob) - return vmw_gb_context_init(dev_priv, res, res_free); + return vmw_gb_context_init(dev_priv, dx, res, res_free); ret = vmw_resource_init(dev_priv, res, false, res_free, &vmw_legacy_context_func); @@ -215,12 +267,12 @@ static int vmw_context_init(struct vmw_private *dev_priv, return -ENOMEM; } - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); - cmd->header.size = cpu_to_le32(sizeof(cmd->body)); - cmd->body.cid = cpu_to_le32(res->id); + cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; vmw_fifo_commit(dev_priv, sizeof(*cmd)); - (void) vmw_3d_resource_inc(dev_priv, false); + vmw_fifo_resource_inc(dev_priv); vmw_resource_activate(res, vmw_hw_context_destroy); return 0; @@ -232,19 +284,10 @@ out_early: return ret; } -struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) -{ - struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); - int ret; - - if (unlikely(res == NULL)) - return NULL; - - ret = vmw_context_init(dev_priv, res, NULL); - - return (ret == 0) ? res : NULL; -} +/* + * GB context. + */ static int vmw_gb_context_create(struct vmw_resource *res) { @@ -281,7 +324,7 @@ static int vmw_gb_context_create(struct vmw_resource *res) cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; vmw_fifo_commit(dev_priv, sizeof(*cmd)); - (void) vmw_3d_resource_inc(dev_priv, false); + vmw_fifo_resource_inc(dev_priv); return 0; @@ -309,7 +352,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res, "binding.\n"); return -ENOMEM; } - cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; @@ -346,7 +388,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, BUG_ON(bo->mem.mem_type != VMW_PL_MOB); mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_state_scrub(&uctx->cbs); + vmw_binding_state_scrub(uctx->cbs); submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); @@ -414,7 +456,231 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) if (dev_priv->query_cid == res->id) dev_priv->query_cid_valid = false; vmw_resource_release_id(res); - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); + + return 0; +} + +/* + * DX context. + */ + +static int vmw_dx_context_create(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + int ret; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXDefineContext body; + } *cmd; + + if (likely(res->id != -1)) + return 0; + + ret = vmw_resource_alloc_id(res); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a context id.\n"); + goto out_no_id; + } + + if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) { + ret = -EBUSY; + goto out_no_fifo; + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "creation.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_fifo_resource_inc(dev_priv); + + return 0; + +out_no_fifo: + vmw_resource_release_id(res); +out_no_id: + return ret; +} + +static int vmw_dx_context_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXBindContext body; + } *cmd; + struct ttm_buffer_object *bo = val_buf->bo; + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "binding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; + cmd->body.mobid = bo->mem.start; + cmd->body.validContents = res->backup_dirty; + res->backup_dirty = false; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + + return 0; +} + +/** + * vmw_dx_context_scrub_cotables - Scrub all bindings and + * cotables from a context + * + * @ctx: Pointer to the context resource + * @readback: Whether to save the otable contents on scrubbing. + * + * COtables must be unbound before their context, but unbinding requires + * the backup buffer being reserved, whereas scrubbing does not. + * This function scrubs all cotables of a context, potentially reading back + * the contents into their backup buffers. However, scrubbing cotables + * also makes the device context invalid, so scrub all bindings first so + * that doesn't have to be done later with an invalid context. + */ +void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, + bool readback) +{ + struct vmw_user_context *uctx = + container_of(ctx, struct vmw_user_context, res); + int i; + + vmw_binding_state_scrub(uctx->cbs); + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { + struct vmw_resource *res; + + /* Avoid racing with ongoing cotable destruction. */ + spin_lock(&uctx->cotable_lock); + res = uctx->cotables[vmw_cotable_scrub_order[i]]; + if (res) + res = vmw_resource_reference_unless_doomed(res); + spin_unlock(&uctx->cotable_lock); + if (!res) + continue; + + WARN_ON(vmw_cotable_scrub(res, readback)); + vmw_resource_unreference(&res); + } +} + +static int vmw_dx_context_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct ttm_buffer_object *bo = val_buf->bo; + struct vmw_fence_obj *fence; + struct vmw_user_context *uctx = + container_of(res, struct vmw_user_context, res); + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXReadbackContext body; + } *cmd1; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXBindContext body; + } *cmd2; + uint32_t submit_size; + uint8_t *cmd; + + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + + mutex_lock(&dev_priv->binding_mutex); + vmw_dx_context_scrub_cotables(res, readback); + + if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx && + readback) { + WARN_ON(uctx->dx_query_mob->dx_query_ctx != res); + if (vmw_query_readback_all(uctx->dx_query_mob)) + DRM_ERROR("Failed to read back query states\n"); + } + + submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); + + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "unbinding.\n"); + mutex_unlock(&dev_priv->binding_mutex); + return -ENOMEM; + } + + cmd2 = (void *) cmd; + if (readback) { + cmd1 = (void *) cmd; + cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT; + cmd1->header.size = sizeof(cmd1->body); + cmd1->body.cid = res->id; + cmd2 = (void *) (&cmd1[1]); + } + cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; + cmd2->header.size = sizeof(cmd2->body); + cmd2->body.cid = res->id; + cmd2->body.mobid = SVGA3D_INVALID_ID; + + vmw_fifo_commit(dev_priv, submit_size); + mutex_unlock(&dev_priv->binding_mutex); + + /* + * Create a fence object and fence the backup buffer. + */ + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + + vmw_fence_single_bo(bo, fence); + + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + + return 0; +} + +static int vmw_dx_context_destroy(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXDestroyContext body; + } *cmd; + + if (likely(res->id == -1)) + return 0; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "destruction.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + if (dev_priv->query_cid == res->id) + dev_priv->query_cid_valid = false; + vmw_resource_release_id(res); + vmw_fifo_resource_dec(dev_priv); return 0; } @@ -435,6 +701,11 @@ static void vmw_user_context_free(struct vmw_resource *res) container_of(res, struct vmw_user_context, res); struct vmw_private *dev_priv = res->dev_priv; + if (ctx->cbs) + vmw_binding_state_free(ctx->cbs); + + (void) vmw_context_bind_dx_query(res, NULL); + ttm_base_object_kfree(ctx, base); ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_context_size); @@ -465,8 +736,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); } -int vmw_context_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static int vmw_context_define(struct drm_device *dev, void *data, + struct drm_file *file_priv, bool dx) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_user_context *ctx; @@ -476,6 +747,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; int ret; + if (!dev_priv->has_dx && dx) { + DRM_ERROR("DX contexts not supported by device.\n"); + return -EINVAL; + } /* * Approximate idr memory usage with 128 bytes. It will be limited @@ -516,7 +791,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, * From here on, the destructor takes over resource freeing. */ - ret = vmw_context_init(dev_priv, res, vmw_user_context_free); + ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx); if (unlikely(ret != 0)) goto out_unlock; @@ -535,387 +810,128 @@ out_err: out_unlock: ttm_read_unlock(&dev_priv->reservation_sem); return ret; - -} - -/** - * vmw_context_scrub_shader - scrub a shader binding from a context. - * - * @bi: single binding information. - * @rebind: Whether to issue a bind instead of scrub command. - */ -static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) -{ - struct vmw_private *dev_priv = bi->ctx->dev_priv; - struct { - SVGA3dCmdHeader header; - SVGA3dCmdSetShader body; - } *cmd; - - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for shader " - "unbinding.\n"); - return -ENOMEM; - } - - cmd->header.id = SVGA_3D_CMD_SET_SHADER; - cmd->header.size = sizeof(cmd->body); - cmd->body.cid = bi->ctx->id; - cmd->body.type = bi->i1.shader_type; - cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); - - return 0; -} - -/** - * vmw_context_scrub_render_target - scrub a render target binding - * from a context. - * - * @bi: single binding information. - * @rebind: Whether to issue a bind instead of scrub command. - */ -static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, - bool rebind) -{ - struct vmw_private *dev_priv = bi->ctx->dev_priv; - struct { - SVGA3dCmdHeader header; - SVGA3dCmdSetRenderTarget body; - } *cmd; - - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for render target " - "unbinding.\n"); - return -ENOMEM; - } - - cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; - cmd->header.size = sizeof(cmd->body); - cmd->body.cid = bi->ctx->id; - cmd->body.type = bi->i1.rt_type; - cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); - cmd->body.target.face = 0; - cmd->body.target.mipmap = 0; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); - - return 0; } -/** - * vmw_context_scrub_texture - scrub a texture binding from a context. - * - * @bi: single binding information. - * @rebind: Whether to issue a bind instead of scrub command. - * - * TODO: Possibly complement this function with a function that takes - * a list of texture bindings and combines them to a single command. - */ -static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, - bool rebind) -{ - struct vmw_private *dev_priv = bi->ctx->dev_priv; - struct { - SVGA3dCmdHeader header; - struct { - SVGA3dCmdSetTextureState c; - SVGA3dTextureState s1; - } body; - } *cmd; - - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for texture " - "unbinding.\n"); - return -ENOMEM; - } - - - cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; - cmd->header.size = sizeof(cmd->body); - cmd->body.c.cid = bi->ctx->id; - cmd->body.s1.stage = bi->i1.texture_stage; - cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; - cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); - - return 0; -} - -/** - * vmw_context_binding_drop: Stop tracking a context binding - * - * @cb: Pointer to binding tracker storage. - * - * Stops tracking a context binding, and re-initializes its storage. - * Typically used when the context binding is replaced with a binding to - * another (or the same, for that matter) resource. - */ -static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) +int vmw_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - list_del(&cb->ctx_list); - if (!list_empty(&cb->res_list)) - list_del(&cb->res_list); - cb->bi.ctx = NULL; + return vmw_context_define(dev, data, file_priv, false); } -/** - * vmw_context_binding_add: Start tracking a context binding - * - * @cbs: Pointer to the context binding state tracker. - * @bi: Information about the binding to track. - * - * Performs basic checks on the binding to make sure arguments are within - * bounds and then starts tracking the binding in the context binding - * state structure @cbs. - */ -int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, - const struct vmw_ctx_bindinfo *bi) +int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct vmw_ctx_binding *loc; - - switch (bi->bt) { - case vmw_ctx_binding_rt: - if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { - DRM_ERROR("Illegal render target type %u.\n", - (unsigned) bi->i1.rt_type); - return -EINVAL; - } - loc = &cbs->render_targets[bi->i1.rt_type]; - break; - case vmw_ctx_binding_tex: - if (unlikely((unsigned)bi->i1.texture_stage >= - SVGA3D_NUM_TEXTURE_UNITS)) { - DRM_ERROR("Illegal texture/sampler unit %u.\n", - (unsigned) bi->i1.texture_stage); - return -EINVAL; - } - loc = &cbs->texture_units[bi->i1.texture_stage]; - break; - case vmw_ctx_binding_shader: - if (unlikely((unsigned)bi->i1.shader_type >= - SVGA3D_SHADERTYPE_MAX)) { - DRM_ERROR("Illegal shader type %u.\n", - (unsigned) bi->i1.shader_type); - return -EINVAL; - } - loc = &cbs->shaders[bi->i1.shader_type]; - break; + union drm_vmw_extended_context_arg *arg = (typeof(arg)) data; + struct drm_vmw_context_arg *rep = &arg->rep; + + switch (arg->req) { + case drm_vmw_context_legacy: + return vmw_context_define(dev, rep, file_priv, false); + case drm_vmw_context_dx: + return vmw_context_define(dev, rep, file_priv, true); default: - BUG(); - } - - if (loc->bi.ctx != NULL) - vmw_context_binding_drop(loc); - - loc->bi = *bi; - loc->bi.scrubbed = false; - list_add_tail(&loc->ctx_list, &cbs->list); - INIT_LIST_HEAD(&loc->res_list); - - return 0; -} - -/** - * vmw_context_binding_transfer: Transfer a context binding tracking entry. - * - * @cbs: Pointer to the persistent context binding state tracker. - * @bi: Information about the binding to track. - * - */ -static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, - const struct vmw_ctx_bindinfo *bi) -{ - struct vmw_ctx_binding *loc; - - switch (bi->bt) { - case vmw_ctx_binding_rt: - loc = &cbs->render_targets[bi->i1.rt_type]; break; - case vmw_ctx_binding_tex: - loc = &cbs->texture_units[bi->i1.texture_stage]; - break; - case vmw_ctx_binding_shader: - loc = &cbs->shaders[bi->i1.shader_type]; - break; - default: - BUG(); - } - - if (loc->bi.ctx != NULL) - vmw_context_binding_drop(loc); - - if (bi->res != NULL) { - loc->bi = *bi; - list_add_tail(&loc->ctx_list, &cbs->list); - list_add_tail(&loc->res_list, &bi->res->binding_head); } + return -EINVAL; } /** - * vmw_context_binding_kill - Kill a binding on the device - * and stop tracking it. - * - * @cb: Pointer to binding tracker storage. - * - * Emits FIFO commands to scrub a binding represented by @cb. - * Then stops tracking the binding and re-initializes its storage. - */ -static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) -{ - if (!cb->bi.scrubbed) { - (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); - cb->bi.scrubbed = true; - } - vmw_context_binding_drop(cb); -} - -/** - * vmw_context_binding_state_kill - Kill all bindings associated with a - * struct vmw_ctx_binding state structure, and re-initialize the structure. + * vmw_context_binding_list - Return a list of context bindings * - * @cbs: Pointer to the context binding state tracker. + * @ctx: The context resource * - * Emits commands to scrub all bindings associated with the - * context binding state tracker. Then re-initializes the whole structure. + * Returns the current list of bindings of the given context. Note that + * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. */ -static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) +struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) { - struct vmw_ctx_binding *entry, *next; + struct vmw_user_context *uctx = + container_of(ctx, struct vmw_user_context, res); - list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) - vmw_context_binding_kill(entry); + return vmw_binding_state_list(uctx->cbs); } -/** - * vmw_context_binding_state_scrub - Scrub all bindings associated with a - * struct vmw_ctx_binding state structure. - * - * @cbs: Pointer to the context binding state tracker. - * - * Emits commands to scrub all bindings associated with the - * context binding state tracker. - */ -static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) +struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) { - struct vmw_ctx_binding *entry; - - list_for_each_entry(entry, &cbs->list, ctx_list) { - if (!entry->bi.scrubbed) { - (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); - entry->bi.scrubbed = true; - } - } + return container_of(ctx, struct vmw_user_context, res)->man; } -/** - * vmw_context_binding_res_list_kill - Kill all bindings on a - * resource binding list - * - * @head: list head of resource binding list - * - * Kills all bindings associated with a specific resource. Typically - * called before the resource is destroyed. - */ -void vmw_context_binding_res_list_kill(struct list_head *head) +struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, + SVGACOTableType cotable_type) { - struct vmw_ctx_binding *entry, *next; + if (cotable_type >= SVGA_COTABLE_DX10_MAX) + return ERR_PTR(-EINVAL); - list_for_each_entry_safe(entry, next, head, res_list) - vmw_context_binding_kill(entry); + return vmw_resource_reference + (container_of(ctx, struct vmw_user_context, res)-> + cotables[cotable_type]); } /** - * vmw_context_binding_res_list_scrub - Scrub all bindings on a - * resource binding list + * vmw_context_binding_state - + * Return a pointer to a context binding state structure * - * @head: list head of resource binding list + * @ctx: The context resource * - * Scrub all bindings associated with a specific resource. Typically - * called before the resource is evicted. + * Returns the current state of bindings of the given context. Note that + * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked. */ -void vmw_context_binding_res_list_scrub(struct list_head *head) +struct vmw_ctx_binding_state * +vmw_context_binding_state(struct vmw_resource *ctx) { - struct vmw_ctx_binding *entry; - - list_for_each_entry(entry, head, res_list) { - if (!entry->bi.scrubbed) { - (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); - entry->bi.scrubbed = true; - } - } + return container_of(ctx, struct vmw_user_context, res)->cbs; } /** - * vmw_context_binding_state_transfer - Commit staged binding info + * vmw_context_bind_dx_query - + * Sets query MOB for the context. If @mob is NULL, then this function will + * remove the association between the MOB and the context. This function + * assumes the binding_mutex is held. * - * @ctx: Pointer to context to commit the staged binding info to. - * @from: Staged binding info built during execbuf. + * @ctx_res: The context resource + * @mob: a reference to the query MOB * - * Transfers binding info from a temporary structure to the persistent - * structure in the context. This can be done once commands + * Returns -EINVAL if a MOB has already been set and does not match the one + * specified in the parameter. 0 otherwise. */ -void vmw_context_binding_state_transfer(struct vmw_resource *ctx, - struct vmw_ctx_binding_state *from) +int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, + struct vmw_dma_buffer *mob) { struct vmw_user_context *uctx = - container_of(ctx, struct vmw_user_context, res); - struct vmw_ctx_binding *entry, *next; - - list_for_each_entry_safe(entry, next, &from->list, ctx_list) - vmw_context_binding_transfer(&uctx->cbs, &entry->bi); -} + container_of(ctx_res, struct vmw_user_context, res); -/** - * vmw_context_rebind_all - Rebind all scrubbed bindings of a context - * - * @ctx: The context resource - * - * Walks through the context binding list and rebinds all scrubbed - * resources. - */ -int vmw_context_rebind_all(struct vmw_resource *ctx) -{ - struct vmw_ctx_binding *entry; - struct vmw_user_context *uctx = - container_of(ctx, struct vmw_user_context, res); - struct vmw_ctx_binding_state *cbs = &uctx->cbs; - int ret; + if (mob == NULL) { + if (uctx->dx_query_mob) { + uctx->dx_query_mob->dx_query_ctx = NULL; + vmw_dmabuf_unreference(&uctx->dx_query_mob); + uctx->dx_query_mob = NULL; + } - list_for_each_entry(entry, &cbs->list, ctx_list) { - if (likely(!entry->bi.scrubbed)) - continue; + return 0; + } - if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == - SVGA3D_INVALID_ID)) - continue; + /* Can only have one MOB per context for queries */ + if (uctx->dx_query_mob && uctx->dx_query_mob != mob) + return -EINVAL; - ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); - if (unlikely(ret != 0)) - return ret; + mob->dx_query_ctx = ctx_res; - entry->bi.scrubbed = false; - } + if (!uctx->dx_query_mob) + uctx->dx_query_mob = vmw_dmabuf_reference(mob); return 0; } /** - * vmw_context_binding_list - Return a list of context bindings - * - * @ctx: The context resource + * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob * - * Returns the current list of bindings of the given context. Note that - * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. + * @ctx_res: The context resource */ -struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) +struct vmw_dma_buffer * +vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) { - return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); -} + struct vmw_user_context *uctx = + container_of(ctx_res, struct vmw_user_context, res); -struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) -{ - return container_of(ctx, struct vmw_user_context, res)->man; + return uctx->dx_query_mob; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c new file mode 100644 index 000000000000..ce659a125f2b --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -0,0 +1,662 @@ +/************************************************************************** + * + * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Treat context OTables as resources to make use of the resource + * backing MOB eviction mechanism, that is used to read back the COTable + * whenever the backing MOB is evicted. + */ + +#include "vmwgfx_drv.h" +#include "vmwgfx_resource_priv.h" +#include <ttm/ttm_placement.h> +#include "vmwgfx_so.h" + +/** + * struct vmw_cotable - Context Object Table resource + * + * @res: struct vmw_resource we are deriving from. + * @ctx: non-refcounted pointer to the owning context. + * @size_read_back: Size of data read back during eviction. + * @seen_entries: Seen entries in command stream for this cotable. + * @type: The cotable type. + * @scrubbed: Whether the cotable has been scrubbed. + * @resource_list: List of resources in the cotable. + */ +struct vmw_cotable { + struct vmw_resource res; + struct vmw_resource *ctx; + size_t size_read_back; + int seen_entries; + u32 type; + bool scrubbed; + struct list_head resource_list; +}; + +/** + * struct vmw_cotable_info - Static info about cotable types + * + * @min_initial_entries: Min number of initial intries at cotable allocation + * for this cotable type. + * @size: Size of each entry. + */ +struct vmw_cotable_info { + u32 min_initial_entries; + u32 size; + void (*unbind_func)(struct vmw_private *, struct list_head *, + bool); +}; + +static const struct vmw_cotable_info co_info[] = { + {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy}, + {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy}, + {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy}, + {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL}, + {1, sizeof(SVGACOTableDXBlendStateEntry), NULL}, + {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL}, + {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL}, + {1, sizeof(SVGACOTableDXSamplerEntry), NULL}, + {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL}, + {1, sizeof(SVGACOTableDXQueryEntry), NULL}, + {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub} +}; + +/* + * Cotables with bindings that we remove must be scrubbed first, + * otherwise, the device will swap in an invalid context when we remove + * bindings before scrubbing a cotable... + */ +const SVGACOTableType vmw_cotable_scrub_order[] = { + SVGA_COTABLE_RTVIEW, + SVGA_COTABLE_DSVIEW, + SVGA_COTABLE_SRVIEW, + SVGA_COTABLE_DXSHADER, + SVGA_COTABLE_ELEMENTLAYOUT, + SVGA_COTABLE_BLENDSTATE, + SVGA_COTABLE_DEPTHSTENCIL, + SVGA_COTABLE_RASTERIZERSTATE, + SVGA_COTABLE_SAMPLER, + SVGA_COTABLE_STREAMOUTPUT, + SVGA_COTABLE_DXQUERY, +}; + +static int vmw_cotable_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf); +static int vmw_cotable_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf); +static int vmw_cotable_create(struct vmw_resource *res); +static int vmw_cotable_destroy(struct vmw_resource *res); + +static const struct vmw_res_func vmw_cotable_func = { + .res_type = vmw_res_cotable, + .needs_backup = true, + .may_evict = true, + .type_name = "context guest backed object tables", + .backup_placement = &vmw_mob_placement, + .create = vmw_cotable_create, + .destroy = vmw_cotable_destroy, + .bind = vmw_cotable_bind, + .unbind = vmw_cotable_unbind, +}; + +/** + * vmw_cotable - Convert a struct vmw_resource pointer to a struct + * vmw_cotable pointer + * + * @res: Pointer to the resource. + */ +static struct vmw_cotable *vmw_cotable(struct vmw_resource *res) +{ + return container_of(res, struct vmw_cotable, res); +} + +/** + * vmw_cotable_destroy - Cotable resource destroy callback + * + * @res: Pointer to the cotable resource. + * + * There is no device cotable destroy command, so this function only + * makes sure that the resource id is set to invalid. + */ +static int vmw_cotable_destroy(struct vmw_resource *res) +{ + res->id = -1; + return 0; +} + +/** + * vmw_cotable_unscrub - Undo a cotable unscrub operation + * + * @res: Pointer to the cotable resource + * + * This function issues commands to (re)bind the cotable to + * its backing mob, which needs to be validated and reserved at this point. + * This is identical to bind() except the function interface looks different. + */ +static int vmw_cotable_unscrub(struct vmw_resource *res) +{ + struct vmw_cotable *vcotbl = vmw_cotable(res); + struct vmw_private *dev_priv = res->dev_priv; + struct ttm_buffer_object *bo = &res->backup->base; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetCOTable body; + } *cmd; + + WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); + lockdep_assert_held(&bo->resv->lock.base); + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID); + if (!cmd) { + DRM_ERROR("Failed reserving FIFO space for cotable " + "binding.\n"); + return -ENOMEM; + } + + WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID); + WARN_ON(bo->mem.mem_type != VMW_PL_MOB); + cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = vcotbl->ctx->id; + cmd->body.type = vcotbl->type; + cmd->body.mobid = bo->mem.start; + cmd->body.validSizeInBytes = vcotbl->size_read_back; + + vmw_fifo_commit_flush(dev_priv, sizeof(*cmd)); + vcotbl->scrubbed = false; + + return 0; +} + +/** + * vmw_cotable_bind - Undo a cotable unscrub operation + * + * @res: Pointer to the cotable resource + * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller + * for convenience / fencing. + * + * This function issues commands to (re)bind the cotable to + * its backing mob, which needs to be validated and reserved at this point. + */ +static int vmw_cotable_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf) +{ + /* + * The create() callback may have changed @res->backup without + * the caller noticing, and with val_buf->bo still pointing to + * the old backup buffer. Although hackish, and not used currently, + * take the opportunity to correct the value here so that it's not + * misused in the future. + */ + val_buf->bo = &res->backup->base; + + return vmw_cotable_unscrub(res); +} + +/** + * vmw_cotable_scrub - Scrub the cotable from the device. + * + * @res: Pointer to the cotable resource. + * @readback: Whether initiate a readback of the cotable data to the backup + * buffer. + * + * In some situations (context swapouts) it might be desirable to make the + * device forget about the cotable without performing a full unbind. A full + * unbind requires reserved backup buffers and it might not be possible to + * reserve them due to locking order violation issues. The vmw_cotable_scrub + * function implements a partial unbind() without that requirement but with the + * following restrictions. + * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must + * be called. + * 2) Before the cotable backing buffer is used by the CPU, or during the + * resource destruction, vmw_cotable_unbind() must be called. + */ +int vmw_cotable_scrub(struct vmw_resource *res, bool readback) +{ + struct vmw_cotable *vcotbl = vmw_cotable(res); + struct vmw_private *dev_priv = res->dev_priv; + size_t submit_size; + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXReadbackCOTable body; + } *cmd0; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetCOTable body; + } *cmd1; + + if (vcotbl->scrubbed) + return 0; + + if (co_info[vcotbl->type].unbind_func) + co_info[vcotbl->type].unbind_func(dev_priv, + &vcotbl->resource_list, + readback); + submit_size = sizeof(*cmd1); + if (readback) + submit_size += sizeof(*cmd0); + + cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID); + if (!cmd1) { + DRM_ERROR("Failed reserving FIFO space for cotable " + "unbinding.\n"); + return -ENOMEM; + } + + vcotbl->size_read_back = 0; + if (readback) { + cmd0 = (void *) cmd1; + cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; + cmd0->header.size = sizeof(cmd0->body); + cmd0->body.cid = vcotbl->ctx->id; + cmd0->body.type = vcotbl->type; + cmd1 = (void *) &cmd0[1]; + vcotbl->size_read_back = res->backup_size; + } + cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE; + cmd1->header.size = sizeof(cmd1->body); + cmd1->body.cid = vcotbl->ctx->id; + cmd1->body.type = vcotbl->type; + cmd1->body.mobid = SVGA3D_INVALID_ID; + cmd1->body.validSizeInBytes = 0; + vmw_fifo_commit_flush(dev_priv, submit_size); + vcotbl->scrubbed = true; + + /* Trigger a create() on next validate. */ + res->id = -1; + + return 0; +} + +/** + * vmw_cotable_unbind - Cotable resource unbind callback + * + * @res: Pointer to the cotable resource. + * @readback: Whether to read back cotable data to the backup buffer. + * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller + * for convenience / fencing. + * + * Unbinds the cotable from the device and fences the backup buffer. + */ +static int vmw_cotable_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_cotable *vcotbl = vmw_cotable(res); + struct vmw_private *dev_priv = res->dev_priv; + struct ttm_buffer_object *bo = val_buf->bo; + struct vmw_fence_obj *fence; + int ret; + + if (list_empty(&res->mob_head)) + return 0; + + WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); + lockdep_assert_held(&bo->resv->lock.base); + + mutex_lock(&dev_priv->binding_mutex); + if (!vcotbl->scrubbed) + vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); + mutex_unlock(&dev_priv->binding_mutex); + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + vmw_fence_single_bo(bo, fence); + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + + return ret; +} + +/** + * vmw_cotable_readback - Read back a cotable without unbinding. + * + * @res: The cotable resource. + * + * Reads back a cotable to its backing mob without scrubbing the MOB from + * the cotable. The MOB is fenced for subsequent CPU access. + */ +static int vmw_cotable_readback(struct vmw_resource *res) +{ + struct vmw_cotable *vcotbl = vmw_cotable(res); + struct vmw_private *dev_priv = res->dev_priv; + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXReadbackCOTable body; + } *cmd; + struct vmw_fence_obj *fence; + + if (!vcotbl->scrubbed) { + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), + SVGA3D_INVALID_ID); + if (!cmd) { + DRM_ERROR("Failed reserving FIFO space for cotable " + "readback.\n"); + return -ENOMEM; + } + cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = vcotbl->ctx->id; + cmd->body.type = vcotbl->type; + vcotbl->size_read_back = res->backup_size; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + } + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + vmw_fence_single_bo(&res->backup->base, fence); + vmw_fence_obj_unreference(&fence); + + return 0; +} + +/** + * vmw_cotable_resize - Resize a cotable. + * + * @res: The cotable resource. + * @new_size: The new size. + * + * Resizes a cotable and binds the new backup buffer. + * On failure the cotable is left intact. + * Important! This function may not fail once the MOB switch has been + * committed to hardware. That would put the device context in an + * invalid state which we can't currently recover from. + */ +static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_cotable *vcotbl = vmw_cotable(res); + struct vmw_dma_buffer *buf, *old_buf = res->backup; + struct ttm_buffer_object *bo, *old_bo = &res->backup->base; + size_t old_size = res->backup_size; + size_t old_size_read_back = vcotbl->size_read_back; + size_t cur_size_read_back; + struct ttm_bo_kmap_obj old_map, new_map; + int ret; + size_t i; + + ret = vmw_cotable_readback(res); + if (ret) + return ret; + + cur_size_read_back = vcotbl->size_read_back; + vcotbl->size_read_back = old_size_read_back; + + /* + * While device is processing, Allocate and reserve a buffer object + * for the new COTable. Initially pin the buffer object to make sure + * we can use tryreserve without failure. + */ + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, + true, vmw_dmabuf_bo_free); + if (ret) { + DRM_ERROR("Failed initializing new cotable MOB.\n"); + return ret; + } + + bo = &buf->base; + WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL)); + + ret = ttm_bo_wait(old_bo, false, false, false); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed waiting for cotable unbind.\n"); + goto out_wait; + } + + /* + * Do a page by page copy of COTables. This eliminates slow vmap()s. + * This should really be a TTM utility. + */ + for (i = 0; i < old_bo->num_pages; ++i) { + bool dummy; + + ret = ttm_bo_kmap(old_bo, i, 1, &old_map); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed mapping old COTable on resize.\n"); + goto out_wait; + } + ret = ttm_bo_kmap(bo, i, 1, &new_map); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed mapping new COTable on resize.\n"); + goto out_map_new; + } + memcpy(ttm_kmap_obj_virtual(&new_map, &dummy), + ttm_kmap_obj_virtual(&old_map, &dummy), + PAGE_SIZE); + ttm_bo_kunmap(&new_map); + ttm_bo_kunmap(&old_map); + } + + /* Unpin new buffer, and switch backup buffers. */ + ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed validating new COTable backup buffer.\n"); + goto out_wait; + } + + res->backup = buf; + res->backup_size = new_size; + vcotbl->size_read_back = cur_size_read_back; + + /* + * Now tell the device to switch. If this fails, then we need to + * revert the full resize. + */ + ret = vmw_cotable_unscrub(res); + if (ret) { + DRM_ERROR("Failed switching COTable backup buffer.\n"); + res->backup = old_buf; + res->backup_size = old_size; + vcotbl->size_read_back = old_size_read_back; + goto out_wait; + } + + /* Let go of the old mob. */ + list_del(&res->mob_head); + list_add_tail(&res->mob_head, &buf->res_list); + vmw_dmabuf_unreference(&old_buf); + res->id = vcotbl->type; + + return 0; + +out_map_new: + ttm_bo_kunmap(&old_map); +out_wait: + ttm_bo_unreserve(bo); + vmw_dmabuf_unreference(&buf); + + return ret; +} + +/** + * vmw_cotable_create - Cotable resource create callback + * + * @res: Pointer to a cotable resource. + * + * There is no separate create command for cotables, so this callback, which + * is called before bind() in the validation sequence is instead used for two + * things. + * 1) Unscrub the cotable if it is scrubbed and still attached to a backup + * buffer, that is, if @res->mob_head is non-empty. + * 2) Resize the cotable if needed. + */ +static int vmw_cotable_create(struct vmw_resource *res) +{ + struct vmw_cotable *vcotbl = vmw_cotable(res); + size_t new_size = res->backup_size; + size_t needed_size; + int ret; + + /* Check whether we need to resize the cotable */ + needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size; + while (needed_size > new_size) + new_size *= 2; + + if (likely(new_size <= res->backup_size)) { + if (vcotbl->scrubbed && !list_empty(&res->mob_head)) { + ret = vmw_cotable_unscrub(res); + if (ret) + return ret; + } + res->id = vcotbl->type; + return 0; + } + + return vmw_cotable_resize(res, new_size); +} + +/** + * vmw_hw_cotable_destroy - Cotable hw_destroy callback + * + * @res: Pointer to a cotable resource. + * + * The final (part of resource destruction) destroy callback. + */ +static void vmw_hw_cotable_destroy(struct vmw_resource *res) +{ + (void) vmw_cotable_destroy(res); +} + +static size_t cotable_acc_size; + +/** + * vmw_cotable_free - Cotable resource destructor + * + * @res: Pointer to a cotable resource. + */ +static void vmw_cotable_free(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + + kfree(res); + ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); +} + +/** + * vmw_cotable_alloc - Create a cotable resource + * + * @dev_priv: Pointer to a device private struct. + * @ctx: Pointer to the context resource. + * The cotable resource will not add a refcount. + * @type: The cotable type. + */ +struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, + struct vmw_resource *ctx, + u32 type) +{ + struct vmw_cotable *vcotbl; + int ret; + u32 num_entries; + + if (unlikely(cotable_acc_size == 0)) + cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable)); + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + cotable_acc_size, false, true); + if (unlikely(ret)) + return ERR_PTR(ret); + + vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); + if (unlikely(vcotbl == NULL)) { + ret = -ENOMEM; + goto out_no_alloc; + } + + ret = vmw_resource_init(dev_priv, &vcotbl->res, true, + vmw_cotable_free, &vmw_cotable_func); + if (unlikely(ret != 0)) + goto out_no_init; + + INIT_LIST_HEAD(&vcotbl->resource_list); + vcotbl->res.id = type; + vcotbl->res.backup_size = PAGE_SIZE; + num_entries = PAGE_SIZE / co_info[type].size; + if (num_entries < co_info[type].min_initial_entries) { + vcotbl->res.backup_size = co_info[type].min_initial_entries * + co_info[type].size; + vcotbl->res.backup_size = + (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK; + } + + vcotbl->scrubbed = true; + vcotbl->seen_entries = -1; + vcotbl->type = type; + vcotbl->ctx = ctx; + + vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); + + return &vcotbl->res; + +out_no_init: + kfree(vcotbl); +out_no_alloc: + ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); + return ERR_PTR(ret); +} + +/** + * vmw_cotable_notify - Notify the cotable about an item creation + * + * @res: Pointer to a cotable resource. + * @id: Item id. + */ +int vmw_cotable_notify(struct vmw_resource *res, int id) +{ + struct vmw_cotable *vcotbl = vmw_cotable(res); + + if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) { + DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n", + (unsigned) vcotbl->type, id); + return -EINVAL; + } + + if (vcotbl->seen_entries < id) { + /* Trigger a call to create() on next validate */ + res->id = -1; + vcotbl->seen_entries = id; + } + + return 0; +} + +/** + * vmw_cotable_add_view - add a view to the cotable's list of active views. + * + * @res: pointer struct vmw_resource representing the cotable. + * @head: pointer to the struct list_head member of the resource, dedicated + * to the cotable active resource list. + */ +void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head) +{ + struct vmw_cotable *vcotbl = + container_of(res, struct vmw_cotable, res); + + list_add_tail(head, &vcotbl->resource_list); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index 914b375763dc..299925a1f6c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -32,25 +32,20 @@ /** - * vmw_dmabuf_to_placement - Validate a buffer to placement. + * vmw_dmabuf_pin_in_placement - Validate a buffer to placement. * * @dev_priv: Driver private. * @buf: DMA buffer to move. - * @pin: Pin buffer if true. + * @placement: The placement to pin it. * @interruptible: Use interruptible wait. * - * May only be called by the current master since it assumes that the - * master lock is the current master's lock. - * This function takes the master's lock in write mode. - * Flushes and unpins the query bo to avoid failures. - * * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - struct ttm_placement *placement, - bool interruptible) +int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + struct ttm_placement *placement, + bool interruptible) { struct ttm_buffer_object *bo = &buf->base; int ret; @@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, goto err; ret = ttm_bo_validate(bo, placement, interruptible, false); + if (!ret) + vmw_bo_pin_reserved(buf, true); ttm_bo_unreserve(bo); @@ -75,12 +72,10 @@ err: } /** - * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr. + * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr. * - * May only be called by the current master since it assumes that the - * master lock is the current master's lock. - * This function takes the master's lock in write mode. - * Flushes and unpins the query bo if @pin == true to avoid failures. + * This function takes the reservation_sem in write mode. + * Flushes and unpins the query bo to avoid failures. * * @dev_priv: Driver private. * @buf: DMA buffer to move. @@ -90,55 +85,34 @@ err: * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool pin, bool interruptible) +int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool interruptible) { struct ttm_buffer_object *bo = &buf->base; - struct ttm_placement *placement; int ret; ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) return ret; - if (pin) - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv); ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err; - /** - * Put BO in VRAM if there is space, otherwise as a GMR. - * If there is no space in VRAM and GMR ids are all used up, - * start evicting GMRs to make room. If the DMA buffer can't be - * used as a GMR, this will return -ENOMEM. - */ - - if (pin) - placement = &vmw_vram_gmr_ne_placement; - else - placement = &vmw_vram_gmr_placement; - - ret = ttm_bo_validate(bo, placement, interruptible, false); + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, + false); if (likely(ret == 0) || ret == -ERESTARTSYS) - goto err_unreserve; - + goto out_unreserve; - /** - * If that failed, try VRAM again, this time evicting - * previous contents. - */ - - if (pin) - placement = &vmw_vram_ne_placement; - else - placement = &vmw_vram_placement; + ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); - ret = ttm_bo_validate(bo, placement, interruptible, false); +out_unreserve: + if (!ret) + vmw_bo_pin_reserved(buf, true); -err_unreserve: ttm_bo_unreserve(bo); err: ttm_write_unlock(&dev_priv->reservation_sem); @@ -146,67 +120,50 @@ err: } /** - * vmw_dmabuf_to_vram - Move a buffer to vram. + * vmw_dmabuf_pin_in_vram - Move a buffer to vram. * - * May only be called by the current master since it assumes that the - * master lock is the current master's lock. - * This function takes the master's lock in write mode. + * This function takes the reservation_sem in write mode. + * Flushes and unpins the query bo to avoid failures. * * @dev_priv: Driver private. * @buf: DMA buffer to move. - * @pin: Pin buffer in vram if true. * @interruptible: Use interruptible wait. * * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool pin, bool interruptible) +int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool interruptible) { - struct ttm_placement *placement; - - if (pin) - placement = &vmw_vram_ne_placement; - else - placement = &vmw_vram_placement; - - return vmw_dmabuf_to_placement(dev_priv, buf, - placement, - interruptible); + return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement, + interruptible); } /** - * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram. + * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram. * - * May only be called by the current master since it assumes that the - * master lock is the current master's lock. - * This function takes the master's lock in write mode. - * Flushes and unpins the query bo if @pin == true to avoid failures. + * This function takes the reservation_sem in write mode. + * Flushes and unpins the query bo to avoid failures. * * @dev_priv: Driver private. - * @buf: DMA buffer to move. - * @pin: Pin buffer in vram if true. + * @buf: DMA buffer to pin. * @interruptible: Use interruptible wait. * * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool pin, bool interruptible) +int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool interruptible) { struct ttm_buffer_object *bo = &buf->base; struct ttm_placement placement; struct ttm_place place; int ret = 0; - if (pin) - place = vmw_vram_ne_placement.placement[0]; - else - place = vmw_vram_placement.placement[0]; + place = vmw_vram_placement.placement[0]; place.lpfn = bo->num_pages; - placement.num_placement = 1; placement.placement = &place; placement.num_busy_placement = 1; @@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - if (pin) - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv); ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err_unlock; - /* Is this buffer already in vram but not at the start of it? */ + /* + * Is this buffer already in vram but not at the start of it? + * In that case, evict it first because TTM isn't good at handling + * that situation. + */ if (bo->mem.mem_type == TTM_PL_VRAM && bo->mem.start < bo->num_pages && bo->mem.start > 0) @@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ret = ttm_bo_validate(bo, &placement, interruptible, false); - /* For some reason we didn't up at the start of vram */ + /* For some reason we didn't end up at the start of vram */ WARN_ON(ret == 0 && bo->offset != 0); + if (!ret) + vmw_bo_pin_reserved(buf, true); ttm_bo_unreserve(bo); err_unlock: @@ -240,13 +202,10 @@ err_unlock: return ret; } - /** - * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer. + * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer. * - * May only be called by the current master since it assumes that the - * master lock is the current master's lock. - * This function takes the master's lock in write mode. + * This function takes the reservation_sem in write mode. * * @dev_priv: Driver private. * @buf: DMA buffer to unpin. @@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, bool interruptible) { - /* - * We could in theory early out if the buffer is - * unpinned but we need to lock and reserve the buffer - * anyways so we don't gain much by that. - */ - return vmw_dmabuf_to_placement(dev_priv, buf, - &vmw_evictable_placement, - interruptible); -} + struct ttm_buffer_object *bo = &buf->base; + int ret; + + ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible); + if (unlikely(ret != 0)) + return ret; + ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); + if (unlikely(ret != 0)) + goto err; + + vmw_bo_pin_reserved(buf, false); + + ttm_bo_unreserve(bo); + +err: + ttm_read_unlock(&dev_priv->reservation_sem); + return ret; +} /** * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement @@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, /** - * vmw_bo_pin - Pin or unpin a buffer object without moving it. + * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. * - * @bo: The buffer object. Must be reserved. + * @vbo: The buffer object. Must be reserved. * @pin: Whether to pin or unpin. * */ -void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) +void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) { struct ttm_place pl; struct ttm_placement placement; + struct ttm_buffer_object *bo = &vbo->base; uint32_t old_mem_type = bo->mem.mem_type; int ret; lockdep_assert_held(&bo->resv->lock.base); + if (pin) { + if (vbo->pin_count++ > 0) + return; + } else { + WARN_ON(vbo->pin_count <= 0); + if (--vbo->pin_count > 0) + return; + } + pl.fpfn = 0; pl.lpfn = 0; pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 620bb5cf617c..f97ec5686cbc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -28,6 +28,7 @@ #include <drm/drmP.h> #include "vmwgfx_drv.h" +#include "vmwgfx_binding.h" #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_object.h> @@ -127,6 +128,9 @@ #define DRM_IOCTL_VMW_SYNCCPU \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ struct drm_vmw_synccpu_arg) +#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ + struct drm_vmw_context_arg) /** * The core DRM version of this macro doesn't account for @@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_UNLOCKED | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED | + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, DRM_UNLOCKED | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, @@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_SYNCCPU, vmw_user_dmabuf_synccpu_ioctl, DRM_UNLOCKED | DRM_RENDER_ALLOW), + VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, + vmw_extended_context_define_ioctl, + DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), }; static struct pci_device_id vmw_pci_id_list[] = { @@ -278,6 +285,8 @@ static void vmw_print_capabilities(uint32_t capabilities) DRM_INFO(" Command Buffers 2.\n"); if (capabilities & SVGA_CAP_GBOBJECTS) DRM_INFO(" Guest Backed Resources.\n"); + if (capabilities & SVGA_CAP_DX) + DRM_INFO(" DX Features.\n"); } /** @@ -296,30 +305,31 @@ static void vmw_print_capabilities(uint32_t capabilities) static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { int ret; - struct ttm_buffer_object *bo; + struct vmw_dma_buffer *vbo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; /* - * Create the bo as pinned, so that a tryreserve will + * Create the vbo as pinned, so that a tryreserve will * immediately succeed. This is because we're the only * user of the bo currently. */ - ret = ttm_bo_create(&dev_priv->bdev, - PAGE_SIZE, - ttm_bo_type_device, - &vmw_sys_ne_placement, - 0, false, NULL, - &bo); + vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); + if (!vbo) + return -ENOMEM; + ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, + &vmw_sys_ne_placement, false, + &vmw_dmabuf_bo_free); if (unlikely(ret != 0)) return ret; - ret = ttm_bo_reserve(bo, false, true, false, NULL); + ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL); BUG_ON(ret != 0); + vmw_bo_pin_reserved(vbo, true); - ret = ttm_bo_kmap(bo, 0, 1, &map); + ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); if (likely(ret == 0)) { result = ttm_kmap_obj_virtual(&map, &dummy); result->totalSize = sizeof(*result); @@ -327,18 +337,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) result->result32 = 0xff; ttm_bo_kunmap(&map); } - vmw_bo_pin(bo, false); - ttm_bo_unreserve(bo); + vmw_bo_pin_reserved(vbo, false); + ttm_bo_unreserve(&vbo->base); if (unlikely(ret != 0)) { DRM_ERROR("Dummy query buffer map failed.\n"); - ttm_bo_unref(&bo); + vmw_dmabuf_unreference(&vbo); } else - dev_priv->dummy_query_bo = bo; + dev_priv->dummy_query_bo = vbo; return ret; } +/** + * vmw_request_device_late - Perform late device setup + * + * @dev_priv: Pointer to device private. + * + * This function performs setup of otables and enables large command + * buffer submission. These tasks are split out to a separate function + * because it reverts vmw_release_device_early and is intended to be used + * by an error path in the hibernation code. + */ +static int vmw_request_device_late(struct vmw_private *dev_priv) +{ + int ret; + + if (dev_priv->has_mob) { + ret = vmw_otables_setup(dev_priv); + if (unlikely(ret != 0)) { + DRM_ERROR("Unable to initialize " + "guest Memory OBjects.\n"); + return ret; + } + } + + if (dev_priv->cman) { + ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, + 256*4096, 2*4096); + if (ret) { + struct vmw_cmdbuf_man *man = dev_priv->cman; + + dev_priv->cman = NULL; + vmw_cmdbuf_man_destroy(man); + } + } + + return 0; +} + static int vmw_request_device(struct vmw_private *dev_priv) { int ret; @@ -349,14 +396,16 @@ static int vmw_request_device(struct vmw_private *dev_priv) return ret; } vmw_fence_fifo_up(dev_priv->fman); - if (dev_priv->has_mob) { - ret = vmw_otables_setup(dev_priv); - if (unlikely(ret != 0)) { - DRM_ERROR("Unable to initialize " - "guest Memory OBjects.\n"); - goto out_no_mob; - } + dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); + if (IS_ERR(dev_priv->cman)) { + dev_priv->cman = NULL; + dev_priv->has_dx = false; } + + ret = vmw_request_device_late(dev_priv); + if (ret) + goto out_no_mob; + ret = vmw_dummy_query_bo_create(dev_priv); if (unlikely(ret != 0)) goto out_no_query_bo; @@ -364,15 +413,29 @@ static int vmw_request_device(struct vmw_private *dev_priv) return 0; out_no_query_bo: - if (dev_priv->has_mob) + if (dev_priv->cman) + vmw_cmdbuf_remove_pool(dev_priv->cman); + if (dev_priv->has_mob) { + (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); vmw_otables_takedown(dev_priv); + } + if (dev_priv->cman) + vmw_cmdbuf_man_destroy(dev_priv->cman); out_no_mob: vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); return ret; } -static void vmw_release_device(struct vmw_private *dev_priv) +/** + * vmw_release_device_early - Early part of fifo takedown. + * + * @dev_priv: Pointer to device private struct. + * + * This is the first part of command submission takedown, to be called before + * buffer management is taken down. + */ +static void vmw_release_device_early(struct vmw_private *dev_priv) { /* * Previous destructions should've released @@ -381,65 +444,31 @@ static void vmw_release_device(struct vmw_private *dev_priv) BUG_ON(dev_priv->pinned_bo != NULL); - ttm_bo_unref(&dev_priv->dummy_query_bo); - if (dev_priv->has_mob) - vmw_otables_takedown(dev_priv); - vmw_fence_fifo_down(dev_priv->fman); - vmw_fifo_release(dev_priv, &dev_priv->fifo); -} - - -/** - * Increase the 3d resource refcount. - * If the count was prevously zero, initialize the fifo, switching to svga - * mode. Note that the master holds a ref as well, and may request an - * explicit switch to svga mode if fb is not running, using @unhide_svga. - */ -int vmw_3d_resource_inc(struct vmw_private *dev_priv, - bool unhide_svga) -{ - int ret = 0; + vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); + if (dev_priv->cman) + vmw_cmdbuf_remove_pool(dev_priv->cman); - mutex_lock(&dev_priv->release_mutex); - if (unlikely(dev_priv->num_3d_resources++ == 0)) { - ret = vmw_request_device(dev_priv); - if (unlikely(ret != 0)) - --dev_priv->num_3d_resources; - } else if (unhide_svga) { - vmw_write(dev_priv, SVGA_REG_ENABLE, - vmw_read(dev_priv, SVGA_REG_ENABLE) & - ~SVGA_REG_ENABLE_HIDE); + if (dev_priv->has_mob) { + ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); + vmw_otables_takedown(dev_priv); } - - mutex_unlock(&dev_priv->release_mutex); - return ret; } /** - * Decrease the 3d resource refcount. - * If the count reaches zero, disable the fifo, switching to vga mode. - * Note that the master holds a refcount as well, and may request an - * explicit switch to vga mode when it releases its refcount to account - * for the situation of an X server vt switch to VGA with 3d resources - * active. + * vmw_release_device_late - Late part of fifo takedown. + * + * @dev_priv: Pointer to device private struct. + * + * This is the last part of the command submission takedown, to be called when + * command submission is no longer needed. It may wait on pending fences. */ -void vmw_3d_resource_dec(struct vmw_private *dev_priv, - bool hide_svga) +static void vmw_release_device_late(struct vmw_private *dev_priv) { - int32_t n3d; - - mutex_lock(&dev_priv->release_mutex); - if (unlikely(--dev_priv->num_3d_resources == 0)) - vmw_release_device(dev_priv); - else if (hide_svga) - vmw_write(dev_priv, SVGA_REG_ENABLE, - vmw_read(dev_priv, SVGA_REG_ENABLE) | - SVGA_REG_ENABLE_HIDE); - - n3d = (int32_t) dev_priv->num_3d_resources; - mutex_unlock(&dev_priv->release_mutex); + vmw_fence_fifo_down(dev_priv->fman); + if (dev_priv->cman) + vmw_cmdbuf_man_destroy(dev_priv->cman); - BUG_ON(n3d < 0); + vmw_fifo_release(dev_priv, &dev_priv->fifo); } /** @@ -603,6 +632,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) spin_lock_init(&dev_priv->hw_lock); spin_lock_init(&dev_priv->waiter_lock); spin_lock_init(&dev_priv->cap_lock); + spin_lock_init(&dev_priv->svga_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); @@ -673,22 +703,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); dev_priv->max_mob_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); - } else + dev_priv->stdu_max_width = + vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); + dev_priv->stdu_max_height = + vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); + + vmw_write(dev_priv, SVGA_REG_DEV_CAP, + SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); + dev_priv->texture_max_width = vmw_read(dev_priv, + SVGA_REG_DEV_CAP); + vmw_write(dev_priv, SVGA_REG_DEV_CAP, + SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); + dev_priv->texture_max_height = vmw_read(dev_priv, + SVGA_REG_DEV_CAP); + } else { + dev_priv->texture_max_width = 8192; + dev_priv->texture_max_height = 8192; dev_priv->prim_bb_mem = dev_priv->vram_size; + } + + vmw_print_capabilities(dev_priv->capabilities); ret = vmw_dma_masks(dev_priv); if (unlikely(ret != 0)) goto out_err0; - /* - * Limit back buffer size to VRAM size. Remove this once - * screen targets are implemented. - */ - if (dev_priv->prim_bb_mem > dev_priv->vram_size) - dev_priv->prim_bb_mem = dev_priv->vram_size; - - vmw_print_capabilities(dev_priv->capabilities); - if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); @@ -714,17 +753,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->active_master = &dev_priv->fbdev_master; - ret = ttm_bo_device_init(&dev_priv->bdev, - dev_priv->bo_global_ref.ref.object, - &vmw_bo_driver, - dev->anon_inode->i_mapping, - VMWGFX_FILE_PAGE_OFFSET, - false); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing TTM buffer object driver.\n"); - goto out_err1; - } - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size); @@ -787,13 +815,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_fman; } + ret = ttm_bo_device_init(&dev_priv->bdev, + dev_priv->bo_global_ref.ref.object, + &vmw_bo_driver, + dev->anon_inode->i_mapping, + VMWGFX_FILE_PAGE_OFFSET, + false); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing TTM buffer object driver.\n"); + goto out_no_bdev; + } + /* + * Enable VRAM, but initially don't use it until SVGA is enabled and + * unhidden. + */ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, (dev_priv->vram_size >> PAGE_SHIFT)); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing memory manager for VRAM.\n"); goto out_no_vram; } + dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; dev_priv->has_gmr = true; if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || @@ -814,18 +857,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } } - vmw_kms_save_vga(dev_priv); + if (dev_priv->has_mob) { + spin_lock(&dev_priv->cap_lock); + vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); + dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); + spin_unlock(&dev_priv->cap_lock); + } + - /* Start kms and overlay systems, needs fifo. */ ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) goto out_no_kms; vmw_overlay_init(dev_priv); + ret = vmw_request_device(dev_priv); + if (ret) + goto out_no_fifo; + + DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); + if (dev_priv->enable_fb) { - ret = vmw_3d_resource_inc(dev_priv, true); - if (unlikely(ret != 0)) - goto out_no_fifo; + vmw_fifo_resource_inc(dev_priv); + vmw_svga_enable(dev_priv); vmw_fb_init(dev_priv); } @@ -838,13 +891,14 @@ out_no_fifo: vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: - vmw_kms_restore_vga(dev_priv); if (dev_priv->has_mob) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); if (dev_priv->has_gmr) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); out_no_vram: + (void)ttm_bo_device_release(&dev_priv->bdev); +out_no_bdev: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) @@ -860,13 +914,13 @@ out_err4: iounmap(dev_priv->mmio_virt); out_err3: arch_phys_wc_del(dev_priv->mmio_mtrr); - (void)ttm_bo_device_release(&dev_priv->bdev); -out_err1: vmw_ttm_global_release(dev_priv); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); + if (dev_priv->ctx.staged_bindings) + vmw_binding_state_free(dev_priv->ctx.staged_bindings); kfree(dev_priv); return ret; } @@ -882,19 +936,24 @@ static int vmw_driver_unload(struct drm_device *dev) drm_ht_remove(&dev_priv->ctx.res_ht); vfree(dev_priv->ctx.cmd_bounce); if (dev_priv->enable_fb) { + vmw_fb_off(dev_priv); vmw_fb_close(dev_priv); - vmw_kms_restore_vga(dev_priv); - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); + vmw_svga_disable(dev_priv); } + vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); if (dev_priv->has_gmr) (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + vmw_release_device_early(dev_priv); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + (void) ttm_bo_device_release(&dev_priv->bdev); + vmw_release_device_late(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); @@ -907,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev) iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); (void)ttm_bo_device_release(&dev_priv->bdev); + if (dev_priv->ctx.staged_bindings) + vmw_binding_state_free(dev_priv->ctx.staged_bindings); vmw_ttm_global_release(dev_priv); for (i = vmw_res_context; i < vmw_res_max; ++i) @@ -1044,17 +1105,27 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, const struct drm_ioctl_desc *ioctl = &vmw_ioctls[nr - DRM_COMMAND_BASE]; - if (unlikely(ioctl->cmd != cmd)) { - DRM_ERROR("Invalid command format, ioctl %d\n", - nr - DRM_COMMAND_BASE); - return -EINVAL; + if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { + ret = (long) drm_ioctl_permit(ioctl->flags, file_priv); + if (unlikely(ret != 0)) + return ret; + + if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN)) + goto out_io_encoding; + + return (long) vmw_execbuf_ioctl(dev, arg, file_priv, + _IOC_SIZE(cmd)); } + + if (unlikely(ioctl->cmd != cmd)) + goto out_io_encoding; + flags = ioctl->flags; } else if (!drm_ioctl_flags(nr, &flags)) return -EINVAL; vmaster = vmw_master_check(dev, file_priv, flags); - if (unlikely(IS_ERR(vmaster))) { + if (IS_ERR(vmaster)) { ret = PTR_ERR(vmaster); if (ret != -ERESTARTSYS) @@ -1068,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, ttm_read_unlock(&vmaster->lock); return ret; + +out_io_encoding: + DRM_ERROR("Invalid command format, ioctl %d\n", + nr - DRM_COMMAND_BASE); + + return -EINVAL; } static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, @@ -1086,30 +1163,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, static void vmw_lastclose(struct drm_device *dev) { - struct drm_crtc *crtc; - struct drm_mode_set set; - int ret; - - set.x = 0; - set.y = 0; - set.fb = NULL; - set.mode = NULL; - set.connectors = NULL; - set.num_connectors = 0; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - set.crtc = crtc; - ret = drm_mode_set_config_internal(&set); - WARN_ON(ret != 0); - } - } static void vmw_master_init(struct vmw_master *vmaster) { ttm_lock_init(&vmaster->lock); - INIT_LIST_HEAD(&vmaster->fb_surf); - mutex_init(&vmaster->fb_surf_mutex); } static int vmw_master_create(struct drm_device *dev, @@ -1137,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev, kfree(vmaster); } - static int vmw_master_set(struct drm_device *dev, struct drm_file *file_priv, bool from_open) @@ -1148,27 +1205,13 @@ static int vmw_master_set(struct drm_device *dev, struct vmw_master *vmaster = vmw_master(file_priv->master); int ret = 0; - if (!dev_priv->enable_fb) { - ret = vmw_3d_resource_inc(dev_priv, true); - if (unlikely(ret != 0)) - return ret; - vmw_kms_save_vga(dev_priv); - vmw_write(dev_priv, SVGA_REG_TRACES, 0); - } - if (active) { BUG_ON(active != &dev_priv->fbdev_master); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); if (unlikely(ret != 0)) - goto out_no_active_lock; + return ret; ttm_lock_set_kill(&active->lock, true, SIGTERM); - ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); - if (unlikely(ret != 0)) { - DRM_ERROR("Unable to clean VRAM on " - "master drop.\n"); - } - dev_priv->active_master = NULL; } @@ -1182,14 +1225,6 @@ static int vmw_master_set(struct drm_device *dev, dev_priv->active_master = vmaster; return 0; - -out_no_active_lock: - if (!dev_priv->enable_fb) { - vmw_kms_restore_vga(dev_priv); - vmw_3d_resource_dec(dev_priv, true); - vmw_write(dev_priv, SVGA_REG_TRACES, 1); - } - return ret; } static void vmw_master_drop(struct drm_device *dev, @@ -1214,16 +1249,9 @@ static void vmw_master_drop(struct drm_device *dev, } ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); - vmw_execbuf_release_pinned_bo(dev_priv); - if (!dev_priv->enable_fb) { - ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); - if (unlikely(ret != 0)) - DRM_ERROR("Unable to clean VRAM on master drop.\n"); - vmw_kms_restore_vga(dev_priv); - vmw_3d_resource_dec(dev_priv, true); - vmw_write(dev_priv, SVGA_REG_TRACES, 1); - } + if (!dev_priv->enable_fb) + vmw_svga_disable(dev_priv); dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); @@ -1233,6 +1261,76 @@ static void vmw_master_drop(struct drm_device *dev, vmw_fb_on(dev_priv); } +/** + * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. + * + * @dev_priv: Pointer to device private struct. + * Needs the reservation sem to be held in non-exclusive mode. + */ +static void __vmw_svga_enable(struct vmw_private *dev_priv) +{ + spin_lock(&dev_priv->svga_lock); + if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { + vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); + dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; + } + spin_unlock(&dev_priv->svga_lock); +} + +/** + * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. + * + * @dev_priv: Pointer to device private struct. + */ +void vmw_svga_enable(struct vmw_private *dev_priv) +{ + ttm_read_lock(&dev_priv->reservation_sem, false); + __vmw_svga_enable(dev_priv); + ttm_read_unlock(&dev_priv->reservation_sem); +} + +/** + * __vmw_svga_disable - Disable SVGA mode and use of VRAM. + * + * @dev_priv: Pointer to device private struct. + * Needs the reservation sem to be held in exclusive mode. + * Will not empty VRAM. VRAM must be emptied by caller. + */ +static void __vmw_svga_disable(struct vmw_private *dev_priv) +{ + spin_lock(&dev_priv->svga_lock); + if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { + dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; + vmw_write(dev_priv, SVGA_REG_ENABLE, + SVGA_REG_ENABLE_HIDE | + SVGA_REG_ENABLE_ENABLE); + } + spin_unlock(&dev_priv->svga_lock); +} + +/** + * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo + * running. + * + * @dev_priv: Pointer to device private struct. + * Will empty VRAM. + */ +void vmw_svga_disable(struct vmw_private *dev_priv) +{ + ttm_write_lock(&dev_priv->reservation_sem, false); + spin_lock(&dev_priv->svga_lock); + if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { + dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; + spin_unlock(&dev_priv->svga_lock); + if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) + DRM_ERROR("Failed evicting VRAM buffers.\n"); + vmw_write(dev_priv, SVGA_REG_ENABLE, + SVGA_REG_ENABLE_HIDE | + SVGA_REG_ENABLE_ENABLE); + } else + spin_unlock(&dev_priv->svga_lock); + ttm_write_unlock(&dev_priv->reservation_sem); +} static void vmw_remove(struct pci_dev *pdev) { @@ -1250,23 +1348,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, switch (val) { case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: + if (dev_priv->enable_fb) + vmw_fb_off(dev_priv); ttm_suspend_lock(&dev_priv->reservation_sem); - /** + /* * This empties VRAM and unbinds all GMR bindings. * Buffer contents is moved to swappable memory. */ vmw_execbuf_release_pinned_bo(dev_priv); vmw_resource_evict_all(dev_priv); + vmw_release_device_early(dev_priv); ttm_bo_swapout_all(&dev_priv->bdev); - + vmw_fence_fifo_down(dev_priv->fman); break; case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: case PM_POST_RESTORE: + vmw_fence_fifo_up(dev_priv->fman); ttm_suspend_unlock(&dev_priv->reservation_sem); - + if (dev_priv->enable_fb) + vmw_fb_on(dev_priv); break; case PM_RESTORE_PREPARE: break; @@ -1276,20 +1377,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, return 0; } -/** - * These might not be needed with the virtual SVGA device. - */ - static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); - if (dev_priv->num_3d_resources != 0) { - DRM_INFO("Can't suspend or hibernate " - "while 3D resources are active.\n"); + if (dev_priv->refuse_hibernation) return -EBUSY; - } pci_save_state(pdev); pci_disable_device(pdev); @@ -1321,56 +1415,62 @@ static int vmw_pm_resume(struct device *kdev) return vmw_pci_resume(pdev); } -static int vmw_pm_prepare(struct device *kdev) +static int vmw_pm_freeze(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); - /** - * Release 3d reference held by fbdev and potentially - * stop fifo. - */ dev_priv->suspended = true; if (dev_priv->enable_fb) - vmw_3d_resource_dec(dev_priv, true); - - if (dev_priv->num_3d_resources != 0) { - - DRM_INFO("Can't suspend or hibernate " - "while 3D resources are active.\n"); + vmw_fifo_resource_dec(dev_priv); + if (atomic_read(&dev_priv->num_fifo_resources) != 0) { + DRM_ERROR("Can't hibernate while 3D resources are active.\n"); if (dev_priv->enable_fb) - vmw_3d_resource_inc(dev_priv, true); + vmw_fifo_resource_inc(dev_priv); + WARN_ON(vmw_request_device_late(dev_priv)); dev_priv->suspended = false; return -EBUSY; } + if (dev_priv->enable_fb) + __vmw_svga_disable(dev_priv); + + vmw_release_device_late(dev_priv); + return 0; } -static void vmw_pm_complete(struct device *kdev) +static int vmw_pm_restore(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); + int ret; vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); (void) vmw_read(dev_priv, SVGA_REG_ID); - /** - * Reclaim 3d reference held by fbdev and potentially - * start fifo. - */ if (dev_priv->enable_fb) - vmw_3d_resource_inc(dev_priv, false); + vmw_fifo_resource_inc(dev_priv); + + ret = vmw_request_device(dev_priv); + if (ret) + return ret; + + if (dev_priv->enable_fb) + __vmw_svga_enable(dev_priv); dev_priv->suspended = false; + + return 0; } static const struct dev_pm_ops vmw_pm_ops = { - .prepare = vmw_pm_prepare, - .complete = vmw_pm_complete, + .freeze = vmw_pm_freeze, + .thaw = vmw_pm_restore, + .restore = vmw_pm_restore, .suspend = vmw_pm_suspend, .resume = vmw_pm_resume, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index d26a6daa9719..8f40692cf48a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -40,17 +40,17 @@ #include <drm/ttm/ttm_module.h> #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20140704" +#define VMWGFX_DRIVER_DATE "20150810" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 6 -#define VMWGFX_DRIVER_PATCHLEVEL 1 +#define VMWGFX_DRIVER_MINOR 9 +#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 #define VMWGFX_MAX_VALIDATIONS 2048 #define VMWGFX_MAX_DISPLAYS 16 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 -#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 +#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 /* * Perhaps we should have sysfs entries for these. @@ -59,6 +59,8 @@ #define VMWGFX_NUM_GB_SHADER 20000 #define VMWGFX_NUM_GB_SURFACE 32768 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_DXCONTEXT 256 +#define VMWGFX_NUM_DXQUERY 512 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ VMWGFX_NUM_GB_SHADER +\ VMWGFX_NUM_GB_SURFACE +\ @@ -85,6 +87,9 @@ struct vmw_fpriv { struct vmw_dma_buffer { struct ttm_buffer_object base; struct list_head res_list; + s32 pin_count; + /* Not ref-counted. Protected by binding_mutex */ + struct vmw_resource *dx_query_ctx; }; /** @@ -113,6 +118,7 @@ struct vmw_resource { bool backup_dirty; /* Protected by backup buffer reserved */ struct vmw_dma_buffer *backup; unsigned long backup_offset; + unsigned long pin_count; /* Protected by resource reserved */ const struct vmw_res_func *func; struct list_head lru_head; /* Protected by the resource lock */ struct list_head mob_head; /* Protected by @backup reserved */ @@ -130,6 +136,9 @@ enum vmw_res_type { vmw_res_surface, vmw_res_stream, vmw_res_shader, + vmw_res_dx_context, + vmw_res_cotable, + vmw_res_view, vmw_res_max }; @@ -137,7 +146,8 @@ enum vmw_res_type { * Resources that are managed using command streams. */ enum vmw_cmdbuf_res_type { - vmw_cmdbuf_res_compat_shader + vmw_cmdbuf_res_shader, + vmw_cmdbuf_res_view }; struct vmw_cmdbuf_res_manager; @@ -160,11 +170,13 @@ struct vmw_surface { struct drm_vmw_size *sizes; uint32_t num_sizes; bool scanout; + uint32_t array_size; /* TODO so far just a extra pointer */ struct vmw_cursor_snooper snooper; struct vmw_surface_offset *offsets; SVGA3dTextureFilter autogen_filter; uint32_t multisample_count; + struct list_head view_list; }; struct vmw_marker_queue { @@ -176,14 +188,15 @@ struct vmw_marker_queue { struct vmw_fifo_state { unsigned long reserved_size; - __le32 *dynamic_buffer; - __le32 *static_buffer; + u32 *dynamic_buffer; + u32 *static_buffer; unsigned long static_buffer_size; bool using_bounce_buffer; uint32_t capabilities; struct mutex fifo_mutex; struct rw_semaphore rwsem; struct vmw_marker_queue marker_queue; + bool dx; }; struct vmw_relocation { @@ -264,70 +277,15 @@ struct vmw_piter { }; /* - * enum vmw_ctx_binding_type - abstract resource to context binding types + * enum vmw_display_unit_type - Describes the display unit */ -enum vmw_ctx_binding_type { - vmw_ctx_binding_shader, - vmw_ctx_binding_rt, - vmw_ctx_binding_tex, - vmw_ctx_binding_max +enum vmw_display_unit_type { + vmw_du_invalid = 0, + vmw_du_legacy, + vmw_du_screen_object, + vmw_du_screen_target }; -/** - * struct vmw_ctx_bindinfo - structure representing a single context binding - * - * @ctx: Pointer to the context structure. NULL means the binding is not - * active. - * @res: Non ref-counted pointer to the bound resource. - * @bt: The binding type. - * @i1: Union of information needed to unbind. - */ -struct vmw_ctx_bindinfo { - struct vmw_resource *ctx; - struct vmw_resource *res; - enum vmw_ctx_binding_type bt; - bool scrubbed; - union { - SVGA3dShaderType shader_type; - SVGA3dRenderTargetType rt_type; - uint32 texture_stage; - } i1; -}; - -/** - * struct vmw_ctx_binding - structure representing a single context binding - * - suitable for tracking in a context - * - * @ctx_list: List head for context. - * @res_list: List head for bound resource. - * @bi: Binding info - */ -struct vmw_ctx_binding { - struct list_head ctx_list; - struct list_head res_list; - struct vmw_ctx_bindinfo bi; -}; - - -/** - * struct vmw_ctx_binding_state - context binding state - * - * @list: linked list of individual bindings. - * @render_targets: Render target bindings. - * @texture_units: Texture units/samplers bindings. - * @shaders: Shader bindings. - * - * Note that this structure also provides storage space for the individual - * struct vmw_ctx_binding objects, so that no dynamic allocation is needed - * for individual bindings. - * - */ -struct vmw_ctx_binding_state { - struct list_head list; - struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; - struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; - struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX]; -}; struct vmw_sw_context{ struct drm_open_hash res_ht; @@ -342,15 +300,21 @@ struct vmw_sw_context{ uint32_t *cmd_bounce; uint32_t cmd_bounce_size; struct list_head resource_list; - struct ttm_buffer_object *cur_query_bo; + struct list_head ctx_resource_list; /* For contexts and cotables */ + struct vmw_dma_buffer *cur_query_bo; struct list_head res_relocations; uint32_t *buf_start; struct vmw_res_cache_entry res_cache[vmw_res_max]; struct vmw_resource *last_query_ctx; bool needs_post_query_barrier; struct vmw_resource *error_resource; - struct vmw_ctx_binding_state staged_bindings; + struct vmw_ctx_binding_state *staged_bindings; + bool staged_bindings_inuse; struct list_head staged_cmd_res; + struct vmw_resource_val_node *dx_ctx_node; + struct vmw_dma_buffer *dx_query_mob; + struct vmw_resource *dx_query_ctx; + struct vmw_cmdbuf_res_manager *man; }; struct vmw_legacy_display; @@ -358,8 +322,6 @@ struct vmw_overlay; struct vmw_master { struct ttm_lock lock; - struct mutex fb_surf_mutex; - struct list_head fb_surf; }; struct vmw_vga_topology_state { @@ -370,6 +332,26 @@ struct vmw_vga_topology_state { uint32_t pos_y; }; + +/* + * struct vmw_otable - Guest Memory OBject table metadata + * + * @size: Size of the table (page-aligned). + * @page_table: Pointer to a struct vmw_mob holding the page table. + */ +struct vmw_otable { + unsigned long size; + struct vmw_mob *page_table; + bool enabled; +}; + +struct vmw_otable_batch { + unsigned num_otables; + struct vmw_otable *otables; + struct vmw_resource *context; + struct ttm_buffer_object *otable_bo; +}; + struct vmw_private { struct ttm_bo_device bdev; struct ttm_bo_global_ref bo_global_ref; @@ -387,9 +369,13 @@ struct vmw_private { uint32_t mmio_size; uint32_t fb_max_width; uint32_t fb_max_height; + uint32_t texture_max_width; + uint32_t texture_max_height; + uint32_t stdu_max_width; + uint32_t stdu_max_height; uint32_t initial_width; uint32_t initial_height; - __le32 __iomem *mmio_virt; + u32 __iomem *mmio_virt; int mmio_mtrr; uint32_t capabilities; uint32_t max_gmr_ids; @@ -401,6 +387,7 @@ struct vmw_private { bool has_mob; spinlock_t hw_lock; spinlock_t cap_lock; + bool has_dx; /* * VGA registers. @@ -420,6 +407,7 @@ struct vmw_private { */ void *fb_info; + enum vmw_display_unit_type active_display_unit; struct vmw_legacy_display *ldu_priv; struct vmw_screen_object_display *sou_priv; struct vmw_overlay *overlay_priv; @@ -453,6 +441,8 @@ struct vmw_private { spinlock_t waiter_lock; int fence_queue_waiters; /* Protected by waiter_lock */ int goal_queue_waiters; /* Protected by waiter_lock */ + int cmdbuf_waiters; /* Protected by irq_lock */ + int error_waiters; /* Protected by irq_lock */ atomic_t fifo_queue_waiters; uint32_t last_read_seqno; spinlock_t irq_lock; @@ -484,6 +474,7 @@ struct vmw_private { bool stealth; bool enable_fb; + spinlock_t svga_lock; /** * Master management. @@ -493,9 +484,10 @@ struct vmw_private { struct vmw_master fbdev_master; struct notifier_block pm_nb; bool suspended; + bool refuse_hibernation; struct mutex release_mutex; - uint32_t num_3d_resources; + atomic_t num_fifo_resources; /* * Replace this with an rwsem as soon as we have down_xx_interruptible() @@ -507,8 +499,8 @@ struct vmw_private { * are protected by the cmdbuf mutex. */ - struct ttm_buffer_object *dummy_query_bo; - struct ttm_buffer_object *pinned_bo; + struct vmw_dma_buffer *dummy_query_bo; + struct vmw_dma_buffer *pinned_bo; uint32_t query_cid; uint32_t query_cid_valid; bool dummy_query_bo_pinned; @@ -531,8 +523,9 @@ struct vmw_private { /* * Guest Backed stuff */ - struct ttm_buffer_object *otable_bo; - struct vmw_otable *otables; + struct vmw_otable_batch otable_batch; + + struct vmw_cmdbuf_man *cman; }; static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) @@ -587,8 +580,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, return val; } -int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga); -void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga); +extern void vmw_svga_enable(struct vmw_private *dev_priv); +extern void vmw_svga_disable(struct vmw_private *dev_priv); + /** * GMR utilities - vmwgfx_gmr.c @@ -610,7 +604,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); extern struct vmw_resource * vmw_resource_reference_unless_doomed(struct vmw_resource *res); extern int vmw_resource_validate(struct vmw_resource *res); -extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); +extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, + bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct ttm_object_file *tfile, @@ -660,10 +655,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, uint32_t *inout_id, struct vmw_resource **out); extern void vmw_resource_unreserve(struct vmw_resource *res, + bool switch_backup, struct vmw_dma_buffer *new_backup, unsigned long new_backup_offset); extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +extern void vmw_query_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob); extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence); extern void vmw_resource_evict_all(struct vmw_private *dev_priv); @@ -671,25 +670,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv); /** * DMA buffer helper routines - vmwgfx_dmabuf.c */ -extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, - struct vmw_dma_buffer *bo, - struct ttm_placement *placement, - bool interruptible); -extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool pin, bool interruptible); -extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool pin, bool interruptible); -extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, +extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, struct vmw_dma_buffer *bo, - bool pin, bool interruptible); + struct ttm_placement *placement, + bool interruptible); +extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool interruptible); +extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool interruptible); +extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv, + struct vmw_dma_buffer *bo, + bool interruptible); extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, struct vmw_dma_buffer *bo, bool interruptible); extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, SVGAGuestPtr *ptr); -extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); +extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); /** * Misc Ioctl functionality - vmwgfx_ioctl.c @@ -717,7 +716,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv, extern void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo); extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); +extern void * +vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); +extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); @@ -726,6 +728,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, uint32_t cid); +extern int vmw_fifo_flush(struct vmw_private *dev_priv, + bool interruptible); /** * TTM glue - vmwgfx_ttm_glue.c @@ -750,6 +754,7 @@ extern struct ttm_placement vmw_sys_ne_placement; extern struct ttm_placement vmw_evictable_placement; extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_mob_placement; +extern struct ttm_placement vmw_mob_ne_placement; extern struct ttm_bo_driver vmw_bo_driver; extern int vmw_dma_quiescent(struct drm_device *dev); extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); @@ -800,14 +805,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter) * Command submission - vmwgfx_execbuf.c */ -extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); +extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, + struct drm_file *file_priv, size_t size); extern int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, uint32_t command_size, uint64_t throttle_us, + uint32_t dx_context_handle, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj **out_fence); @@ -826,6 +832,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle); +extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, + struct ttm_buffer_object *bo, + bool interruptible, + bool validate_as_mob); + /** * IRQs and wating - vmwgfx_irq.c @@ -833,8 +844,8 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, extern irqreturn_t vmw_irq_handler(int irq, void *arg); extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, - uint32_t seqno, bool interruptible, - unsigned long timeout); + uint32_t seqno, bool interruptible, + unsigned long timeout); extern void vmw_irq_preinstall(struct drm_device *dev); extern int vmw_irq_postinstall(struct drm_device *dev); extern void vmw_irq_uninstall(struct drm_device *dev); @@ -852,6 +863,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); +extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, + int *waiter_count); +extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, + u32 flag, int *waiter_count); /** * Rudimentary fence-like objects currently used only for throttling - @@ -861,9 +876,9 @@ extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); extern int vmw_marker_push(struct vmw_marker_queue *queue, - uint32_t seqno); + uint32_t seqno); extern int vmw_marker_pull(struct vmw_marker_queue *queue, - uint32_t signaled_seqno); + uint32_t signaled_seqno); extern int vmw_wait_lag(struct vmw_private *dev_priv, struct vmw_marker_queue *queue, uint32_t us); @@ -908,12 +923,6 @@ int vmw_kms_present(struct vmw_private *dev_priv, uint32_t sid, int32_t destX, int32_t destY, struct drm_vmw_rect *clips, uint32_t num_clips); -int vmw_kms_readback(struct vmw_private *dev_priv, - struct drm_file *file_priv, - struct vmw_framebuffer *vfb, - struct drm_vmw_fence_rep __user *user_fence_rep, - struct drm_vmw_rect *clips, - uint32_t num_clips); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -927,6 +936,10 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, int vmw_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle); +extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); +extern void vmw_resource_unpin(struct vmw_resource *res); +extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); + /** * Overlay control - vmwgfx_overlay.c */ @@ -982,27 +995,33 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv); extern const struct vmw_user_resource_conv *user_context_converter; -extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); - extern int vmw_context_check(struct vmw_private *dev_priv, struct ttm_object_file *tfile, int id, struct vmw_resource **p_res); extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, - const struct vmw_ctx_bindinfo *ci); -extern void -vmw_context_binding_state_transfer(struct vmw_resource *res, - struct vmw_ctx_binding_state *cbs); -extern void vmw_context_binding_res_list_kill(struct list_head *head); -extern void vmw_context_binding_res_list_scrub(struct list_head *head); -extern int vmw_context_rebind_all(struct vmw_resource *ctx); extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); extern struct vmw_cmdbuf_res_manager * vmw_context_res_man(struct vmw_resource *ctx); +extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, + SVGACOTableType cotable_type); +extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); +struct vmw_ctx_binding_state; +extern struct vmw_ctx_binding_state * +vmw_context_binding_state(struct vmw_resource *ctx); +extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, + bool readback); +extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, + struct vmw_dma_buffer *mob); +extern struct vmw_dma_buffer * +vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); + + /* * Surface management - vmwgfx_surface.c */ @@ -1025,6 +1044,16 @@ extern int vmw_surface_check(struct vmw_private *dev_priv, uint32_t handle, int *id); extern int vmw_surface_validate(struct vmw_private *dev_priv, struct vmw_surface *srf); +int vmw_surface_gb_priv_define(struct drm_device *dev, + uint32_t user_accounting_size, + uint32_t svga3d_flags, + SVGA3dSurfaceFormat format, + bool for_scanout, + uint32_t num_mip_levels, + uint32_t multisample_count, + uint32_t array_size, + struct drm_vmw_size size, + struct vmw_surface **srf_out); /* * Shader management - vmwgfx_shader.c @@ -1042,12 +1071,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv, SVGA3dShaderType shader_type, size_t size, struct list_head *list); -extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, - u32 user_key, SVGA3dShaderType shader_type, - struct list_head *list); +extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, + u32 user_key, SVGA3dShaderType shader_type, + struct list_head *list); +extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, + struct vmw_resource *ctx, + u32 user_key, + SVGA3dShaderType shader_type, + struct list_head *list); +extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, + struct list_head *list, + bool readback); + extern struct vmw_resource * -vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, - u32 user_key, SVGA3dShaderType shader_type); +vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, + u32 user_key, SVGA3dShaderType shader_type); /* * Command buffer managed resources - vmwgfx_cmdbuf_res.c @@ -1071,7 +1109,48 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, u32 user_key, - struct list_head *list); + struct list_head *list, + struct vmw_resource **res); + +/* + * COTable management - vmwgfx_cotable.c + */ +extern const SVGACOTableType vmw_cotable_scrub_order[]; +extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, + struct vmw_resource *ctx, + u32 type); +extern int vmw_cotable_notify(struct vmw_resource *res, int id); +extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); +extern void vmw_cotable_add_resource(struct vmw_resource *ctx, + struct list_head *head); + +/* + * Command buffer managerment vmwgfx_cmdbuf.c + */ +struct vmw_cmdbuf_man; +struct vmw_cmdbuf_header; + +extern struct vmw_cmdbuf_man * +vmw_cmdbuf_man_create(struct vmw_private *dev_priv); +extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, + size_t size, size_t default_size); +extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); +extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); +extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, + unsigned long timeout); +extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, + int ctx_id, bool interruptible, + struct vmw_cmdbuf_header *header); +extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, + struct vmw_cmdbuf_header *header, + bool flush); +extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man); +extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, + size_t size, bool interruptible, + struct vmw_cmdbuf_header **p_header); +extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); +extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, + bool interruptible); /** @@ -1116,4 +1195,14 @@ static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) { return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; } + +static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) +{ + atomic_inc(&dev_priv->num_fifo_resources); +} + +static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) +{ + atomic_dec(&dev_priv->num_fifo_resources); +} #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 654c8daeb5ab..b56565457c96 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -29,6 +29,8 @@ #include "vmwgfx_reg.h" #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_placement.h> +#include "vmwgfx_so.h" +#include "vmwgfx_binding.h" #define VMW_RES_HT_ORDER 12 @@ -59,8 +61,11 @@ struct vmw_resource_relocation { * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. * @first_usage: Set to true the first time the resource is referenced in * the command stream. - * @no_buffer_needed: Resources do not need to allocate buffer backup on - * reservation. The command stream will provide one. + * @switching_backup: The command stream provides a new backup buffer for a + * resource. + * @no_buffer_needed: This means @switching_backup is true on first buffer + * reference. So resource reservation does not need to allocate a backup + * buffer for the resource. */ struct vmw_resource_val_node { struct list_head head; @@ -69,8 +74,9 @@ struct vmw_resource_val_node { struct vmw_dma_buffer *new_backup; struct vmw_ctx_binding_state *staged_bindings; unsigned long new_backup_offset; - bool first_usage; - bool no_buffer_needed; + u32 first_usage : 1; + u32 switching_backup : 1; + u32 no_buffer_needed : 1; }; /** @@ -92,22 +98,40 @@ struct vmw_cmd_entry { [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ (_gb_disable), (_gb_enable)} +static int vmw_resource_context_res_add(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + struct vmw_resource *ctx); +static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGAMobId *id, + struct vmw_dma_buffer **vmw_bo_p); +static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, + struct vmw_dma_buffer *vbo, + bool validate_as_mob, + uint32_t *p_val_node); + + /** - * vmw_resource_unreserve - unreserve resources previously reserved for + * vmw_resources_unreserve - unreserve resources previously reserved for * command submission. * - * @list_head: list of resources to unreserve. + * @sw_context: pointer to the software context * @backoff: Whether command submission failed. */ -static void vmw_resource_list_unreserve(struct list_head *list, - bool backoff) +static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, + bool backoff) { struct vmw_resource_val_node *val; + struct list_head *list = &sw_context->resource_list; + + if (sw_context->dx_query_mob && !backoff) + vmw_context_bind_dx_query(sw_context->dx_query_ctx, + sw_context->dx_query_mob); list_for_each_entry(val, list, head) { struct vmw_resource *res = val->res; - struct vmw_dma_buffer *new_backup = - backoff ? NULL : val->new_backup; + bool switch_backup = + (backoff) ? false : val->switching_backup; /* * Transfer staged context bindings to the @@ -115,18 +139,71 @@ static void vmw_resource_list_unreserve(struct list_head *list, */ if (unlikely(val->staged_bindings)) { if (!backoff) { - vmw_context_binding_state_transfer - (val->res, val->staged_bindings); + vmw_binding_state_commit + (vmw_context_binding_state(val->res), + val->staged_bindings); } - kfree(val->staged_bindings); + + if (val->staged_bindings != sw_context->staged_bindings) + vmw_binding_state_free(val->staged_bindings); + else + sw_context->staged_bindings_inuse = false; val->staged_bindings = NULL; } - vmw_resource_unreserve(res, new_backup, - val->new_backup_offset); + vmw_resource_unreserve(res, switch_backup, val->new_backup, + val->new_backup_offset); vmw_dmabuf_unreference(&val->new_backup); } } +/** + * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is + * added to the validate list. + * + * @dev_priv: Pointer to the device private: + * @sw_context: The validation context: + * @node: The validation node holding this context. + */ +static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + struct vmw_resource_val_node *node) +{ + int ret; + + ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); + if (unlikely(ret != 0)) + goto out_err; + + if (!sw_context->staged_bindings) { + sw_context->staged_bindings = + vmw_binding_state_alloc(dev_priv); + if (IS_ERR(sw_context->staged_bindings)) { + DRM_ERROR("Failed to allocate context binding " + "information.\n"); + ret = PTR_ERR(sw_context->staged_bindings); + sw_context->staged_bindings = NULL; + goto out_err; + } + } + + if (sw_context->staged_bindings_inuse) { + node->staged_bindings = vmw_binding_state_alloc(dev_priv); + if (IS_ERR(node->staged_bindings)) { + DRM_ERROR("Failed to allocate context binding " + "information.\n"); + ret = PTR_ERR(node->staged_bindings); + node->staged_bindings = NULL; + goto out_err; + } + } else { + node->staged_bindings = sw_context->staged_bindings; + sw_context->staged_bindings_inuse = true; + } + + return 0; +out_err: + return ret; +} /** * vmw_resource_val_add - Add a resource to the software context's @@ -141,6 +218,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *res, struct vmw_resource_val_node **p_node) { + struct vmw_private *dev_priv = res->dev_priv; struct vmw_resource_val_node *node; struct drm_hash_item *hash; int ret; @@ -169,14 +247,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, kfree(node); return ret; } - list_add_tail(&node->head, &sw_context->resource_list); node->res = vmw_resource_reference(res); node->first_usage = true; - if (unlikely(p_node != NULL)) *p_node = node; - return 0; + if (!dev_priv->has_mob) { + list_add_tail(&node->head, &sw_context->resource_list); + return 0; + } + + switch (vmw_res_type(res)) { + case vmw_res_context: + case vmw_res_dx_context: + list_add(&node->head, &sw_context->ctx_resource_list); + ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); + break; + case vmw_res_cotable: + list_add_tail(&node->head, &sw_context->ctx_resource_list); + break; + default: + list_add_tail(&node->head, &sw_context->resource_list); + break; + } + + return ret; +} + +/** + * vmw_view_res_val_add - Add a view and the surface it's pointing to + * to the validation list + * + * @sw_context: The software context holding the validation list. + * @view: Pointer to the view resource. + * + * Returns 0 if success, negative error code otherwise. + */ +static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, + struct vmw_resource *view) +{ + int ret; + + /* + * First add the resource the view is pointing to, otherwise + * it may be swapped out when the view is validated. + */ + ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); + if (ret) + return ret; + + return vmw_resource_val_add(sw_context, view, NULL); +} + +/** + * vmw_view_id_val_add - Look up a view and add it and the surface it's + * pointing to to the validation list. + * + * @sw_context: The software context holding the validation list. + * @view_type: The view type to look up. + * @id: view id of the view. + * + * The view is represented by a view id and the DX context it's created on, + * or scheduled for creation on. If there is no DX context set, the function + * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure. + */ +static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, + enum vmw_view_type view_type, u32 id) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_resource *view; + int ret; + + if (!ctx_node) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + view = vmw_view_lookup(sw_context->man, view_type, id); + if (IS_ERR(view)) + return PTR_ERR(view); + + ret = vmw_view_res_val_add(sw_context, view); + vmw_resource_unreference(&view); + + return ret; } /** @@ -195,24 +349,56 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_resource *ctx) { struct list_head *binding_list; - struct vmw_ctx_binding *entry; + struct vmw_ctx_bindinfo *entry; int ret = 0; struct vmw_resource *res; + u32 i; + + /* Add all cotables to the validation list. */ + if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { + res = vmw_context_cotable(ctx, i); + if (IS_ERR(res)) + continue; + ret = vmw_resource_val_add(sw_context, res, NULL); + vmw_resource_unreference(&res); + if (unlikely(ret != 0)) + return ret; + } + } + + + /* Add all resources bound to the context to the validation list */ mutex_lock(&dev_priv->binding_mutex); binding_list = vmw_context_binding_list(ctx); list_for_each_entry(entry, binding_list, ctx_list) { - res = vmw_resource_reference_unless_doomed(entry->bi.res); + /* entry->res is not refcounted */ + res = vmw_resource_reference_unless_doomed(entry->res); if (unlikely(res == NULL)) continue; - ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); + if (vmw_res_type(entry->res) == vmw_res_view) + ret = vmw_view_res_val_add(sw_context, entry->res); + else + ret = vmw_resource_val_add(sw_context, entry->res, + NULL); vmw_resource_unreference(&res); if (unlikely(ret != 0)) break; } + if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { + struct vmw_dma_buffer *dx_query_mob; + + dx_query_mob = vmw_context_get_dx_query_mob(ctx); + if (dx_query_mob) + ret = vmw_bo_to_validate_list(sw_context, + dx_query_mob, + true, NULL); + } + mutex_unlock(&dev_priv->binding_mutex); return ret; } @@ -308,7 +494,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, * submission is reached. */ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, - struct ttm_buffer_object *bo, + struct vmw_dma_buffer *vbo, bool validate_as_mob, uint32_t *p_val_node) { @@ -318,7 +504,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct drm_hash_item *hash; int ret; - if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, + if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo, &hash) == 0)) { vval_buf = container_of(hash, struct vmw_validate_buffer, hash); @@ -336,7 +522,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, return -EINVAL; } vval_buf = &sw_context->val_bufs[val_node]; - vval_buf->hash.key = (unsigned long) bo; + vval_buf->hash.key = (unsigned long) vbo; ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); if (unlikely(ret != 0)) { DRM_ERROR("Failed to initialize a buffer validation " @@ -345,7 +531,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, } ++sw_context->cur_val_buf; val_buf = &vval_buf->base; - val_buf->bo = ttm_bo_reference(bo); + val_buf->bo = ttm_bo_reference(&vbo->base); val_buf->shared = false; list_add_tail(&val_buf->head, &sw_context->validate_nodes); vval_buf->validate_as_mob = validate_as_mob; @@ -370,27 +556,39 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, static int vmw_resources_reserve(struct vmw_sw_context *sw_context) { struct vmw_resource_val_node *val; - int ret; + int ret = 0; list_for_each_entry(val, &sw_context->resource_list, head) { struct vmw_resource *res = val->res; - ret = vmw_resource_reserve(res, val->no_buffer_needed); + ret = vmw_resource_reserve(res, true, val->no_buffer_needed); if (unlikely(ret != 0)) return ret; if (res->backup) { - struct ttm_buffer_object *bo = &res->backup->base; + struct vmw_dma_buffer *vbo = res->backup; ret = vmw_bo_to_validate_list - (sw_context, bo, + (sw_context, vbo, vmw_resource_needs_backup(res), NULL); if (unlikely(ret != 0)) return ret; } } - return 0; + + if (sw_context->dx_query_mob) { + struct vmw_dma_buffer *expected_dx_query_mob; + + expected_dx_query_mob = + vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); + if (expected_dx_query_mob && + expected_dx_query_mob != sw_context->dx_query_mob) { + ret = -EINVAL; + } + } + + return ret; } /** @@ -409,6 +607,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) list_for_each_entry(val, &sw_context->resource_list, head) { struct vmw_resource *res = val->res; + struct vmw_dma_buffer *backup = res->backup; ret = vmw_resource_validate(res); if (unlikely(ret != 0)) { @@ -416,18 +615,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) DRM_ERROR("Failed to validate resource.\n"); return ret; } + + /* Check if the resource switched backup buffer */ + if (backup && res->backup && (backup != res->backup)) { + struct vmw_dma_buffer *vbo = res->backup; + + ret = vmw_bo_to_validate_list + (sw_context, vbo, + vmw_resource_needs_backup(res), NULL); + if (ret) { + ttm_bo_unreserve(&vbo->base); + return ret; + } + } } return 0; } - /** * vmw_cmd_res_reloc_add - Add a resource to a software context's * relocation- and validation lists. * * @dev_priv: Pointer to a struct vmw_private identifying the device. * @sw_context: Pointer to the software context. - * @res_type: Resource type. * @id_loc: Pointer to where the id that needs translation is located. * @res: Valid pointer to a struct vmw_resource. * @p_val: If non null, a pointer to the struct vmw_resource_validate_node @@ -435,7 +645,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) */ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, uint32_t *id_loc, struct vmw_resource *res, struct vmw_resource_val_node **p_val) @@ -454,29 +663,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - if (res_type == vmw_res_context && dev_priv->has_mob && - node->first_usage) { - - /* - * Put contexts first on the list to be able to exit - * list traversal for contexts early. - */ - list_del(&node->head); - list_add(&node->head, &sw_context->resource_list); - - ret = vmw_resource_context_res_add(dev_priv, sw_context, res); - if (unlikely(ret != 0)) - return ret; - node->staged_bindings = - kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); - if (node->staged_bindings == NULL) { - DRM_ERROR("Failed to allocate context binding " - "information.\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&node->staged_bindings->list); - } - if (p_val) *p_val = node; @@ -554,7 +740,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, rcache->res = res; rcache->handle = *id_loc; - ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, + ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, res, &node); if (unlikely(ret != 0)) goto out_no_reloc; @@ -573,6 +759,46 @@ out_no_reloc: } /** + * vmw_rebind_dx_query - Rebind DX query associated with the context + * + * @ctx_res: context the query belongs to + * + * This function assumes binding_mutex is held. + */ +static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) +{ + struct vmw_private *dev_priv = ctx_res->dev_priv; + struct vmw_dma_buffer *dx_query_mob; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXBindAllQuery body; + } *cmd; + + + dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); + + if (!dx_query_mob || dx_query_mob->dx_query_ctx) + return 0; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id); + + if (cmd == NULL) { + DRM_ERROR("Failed to rebind queries.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = ctx_res->id; + cmd->body.mobid = dx_query_mob->base.mem.start; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + vmw_context_bind_dx_query(ctx_res, dx_query_mob); + + return 0; +} + +/** * vmw_rebind_contexts - Rebind all resources previously bound to * referenced contexts. * @@ -589,12 +815,80 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) if (unlikely(!val->staged_bindings)) break; - ret = vmw_context_rebind_all(val->res); + ret = vmw_binding_rebind_all + (vmw_context_binding_state(val->res)); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to rebind context.\n"); return ret; } + + ret = vmw_rebind_all_dx_query(val->res); + if (ret != 0) + return ret; + } + + return 0; +} + +/** + * vmw_view_bindings_add - Add an array of view bindings to a context + * binding state tracker. + * + * @sw_context: The execbuf state used for this command. + * @view_type: View type for the bindings. + * @binding_type: Binding type for the bindings. + * @shader_slot: The shader slot to user for the bindings. + * @view_ids: Array of view ids to be bound. + * @num_views: Number of view ids in @view_ids. + * @first_slot: The binding slot to be used for the first view id in @view_ids. + */ +static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, + enum vmw_view_type view_type, + enum vmw_ctx_binding_type binding_type, + uint32 shader_slot, + uint32 view_ids[], u32 num_views, + u32 first_slot) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_cmdbuf_res_manager *man; + u32 i; + int ret; + + if (!ctx_node) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + man = sw_context->man; + for (i = 0; i < num_views; ++i) { + struct vmw_ctx_bindinfo_view binding; + struct vmw_resource *view = NULL; + + if (view_ids[i] != SVGA3D_INVALID_ID) { + view = vmw_view_lookup(man, view_type, view_ids[i]); + if (IS_ERR(view)) { + DRM_ERROR("View not found.\n"); + return PTR_ERR(view); + } + + ret = vmw_view_res_val_add(sw_context, view); + if (ret) { + DRM_ERROR("Could not add view to " + "validation list.\n"); + vmw_resource_unreference(&view); + return ret; + } + } + binding.bi.ctx = ctx_node->res; + binding.bi.res = view; + binding.bi.bt = binding_type; + binding.shader_slot = shader_slot; + binding.slot = first_slot + i; + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, + shader_slot, binding.slot); + if (view) + vmw_resource_unreference(&view); } return 0; @@ -638,6 +932,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, cmd = container_of(header, struct vmw_sid_cmd, header); + if (cmd->body.type >= SVGA3D_RT_MAX) { + DRM_ERROR("Illegal render target type %u.\n", + (unsigned) cmd->body.type); + return -EINVAL; + } + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, user_context_converter, &cmd->body.cid, &ctx_node); @@ -651,13 +951,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, return ret; if (dev_priv->has_mob) { - struct vmw_ctx_bindinfo bi; - - bi.ctx = ctx_node->res; - bi.res = res_node ? res_node->res : NULL; - bi.bt = vmw_ctx_binding_rt; - bi.i1.rt_type = cmd->body.type; - return vmw_context_binding_add(ctx_node->staged_bindings, &bi); + struct vmw_ctx_bindinfo_view binding; + + binding.bi.ctx = ctx_node->res; + binding.bi.res = res_node ? res_node->res : NULL; + binding.bi.bt = vmw_ctx_binding_rt; + binding.slot = cmd->body.type; + vmw_binding_add(ctx_node->staged_bindings, + &binding.bi, 0, binding.slot); } return 0; @@ -674,16 +975,62 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, int ret; cmd = container_of(header, struct vmw_sid_cmd, header); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->body.src.sid, NULL); - if (unlikely(ret != 0)) + if (ret) return ret; + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->body.dest.sid, NULL); } +static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXBufferCopy body; + } *cmd; + int ret; + + cmd = container_of(header, typeof(*cmd), header); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.src, NULL); + if (ret != 0) + return ret; + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.dest, NULL); +} + +static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXPredCopyRegion body; + } *cmd; + int ret; + + cmd = container_of(header, typeof(*cmd), header); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.srcSid, NULL); + if (ret != 0) + return ret; + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.dstSid, NULL); +} + static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -752,7 +1099,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, * command batch. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, - struct ttm_buffer_object *new_query_bo, + struct vmw_dma_buffer *new_query_bo, struct vmw_sw_context *sw_context) { struct vmw_res_cache_entry *ctx_entry = @@ -764,7 +1111,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, if (unlikely(new_query_bo != sw_context->cur_query_bo)) { - if (unlikely(new_query_bo->num_pages > 4)) { + if (unlikely(new_query_bo->base.num_pages > 4)) { DRM_ERROR("Query buffer too large.\n"); return -EINVAL; } @@ -833,12 +1180,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo) { - vmw_bo_pin(dev_priv->pinned_bo, false); - ttm_bo_unref(&dev_priv->pinned_bo); + vmw_bo_pin_reserved(dev_priv->pinned_bo, false); + vmw_dmabuf_unreference(&dev_priv->pinned_bo); } if (!sw_context->needs_post_query_barrier) { - vmw_bo_pin(sw_context->cur_query_bo, true); + vmw_bo_pin_reserved(sw_context->cur_query_bo, true); /* * We pin also the dummy_query_bo buffer so that we @@ -846,14 +1193,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, * dummy queries in context destroy paths. */ - vmw_bo_pin(dev_priv->dummy_query_bo, true); - dev_priv->dummy_query_bo_pinned = true; + if (!dev_priv->dummy_query_bo_pinned) { + vmw_bo_pin_reserved(dev_priv->dummy_query_bo, + true); + dev_priv->dummy_query_bo_pinned = true; + } BUG_ON(sw_context->last_query_ctx == NULL); dev_priv->query_cid = sw_context->last_query_ctx->id; dev_priv->query_cid_valid = true; dev_priv->pinned_bo = - ttm_bo_reference(sw_context->cur_query_bo); + vmw_dmabuf_reference(sw_context->cur_query_bo); } } } @@ -882,7 +1232,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_dma_buffer **vmw_bo_p) { struct vmw_dma_buffer *vmw_bo = NULL; - struct ttm_buffer_object *bo; uint32_t handle = *id; struct vmw_relocation *reloc; int ret; @@ -893,7 +1242,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ret = -EINVAL; goto out_no_reloc; } - bo = &vmw_bo->base; if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { DRM_ERROR("Max number relocations per submission" @@ -906,7 +1254,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, reloc->mob_loc = id; reloc->location = NULL; - ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); + ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index); if (unlikely(ret != 0)) goto out_no_reloc; @@ -944,7 +1292,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_dma_buffer **vmw_bo_p) { struct vmw_dma_buffer *vmw_bo = NULL; - struct ttm_buffer_object *bo; uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret; @@ -955,7 +1302,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ret = -EINVAL; goto out_no_reloc; } - bo = &vmw_bo->base; if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { DRM_ERROR("Max number relocations per submission" @@ -967,7 +1313,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, reloc = &sw_context->relocs[sw_context->cur_reloc++]; reloc->location = ptr; - ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); + ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index); if (unlikely(ret != 0)) goto out_no_reloc; @@ -980,6 +1326,98 @@ out_no_reloc: return ret; } + + +/** + * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context used for this command submission. + * @header: Pointer to the command header in the command stream. + * + * This function adds the new query into the query COTABLE + */ +static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_dx_define_query_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDXDefineQuery q; + } *cmd; + + int ret; + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_resource *cotable_res; + + + if (ctx_node == NULL) { + DRM_ERROR("DX Context not set for query.\n"); + return -EINVAL; + } + + cmd = container_of(header, struct vmw_dx_define_query_cmd, header); + + if (cmd->q.type < SVGA3D_QUERYTYPE_MIN || + cmd->q.type >= SVGA3D_QUERYTYPE_MAX) + return -EINVAL; + + cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY); + ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); + vmw_resource_unreference(&cotable_res); + + return ret; +} + + + +/** + * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context used for this command submission. + * @header: Pointer to the command header in the command stream. + * + * The query bind operation will eventually associate the query ID + * with its backing MOB. In this function, we take the user mode + * MOB ID and use vmw_translate_mob_ptr() to translate it to its + * kernel mode equivalent. + */ +static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_dx_bind_query_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDXBindQuery q; + } *cmd; + + struct vmw_dma_buffer *vmw_bo; + int ret; + + + cmd = container_of(header, struct vmw_dx_bind_query_cmd, header); + + /* + * Look up the buffer pointed to by q.mobid, put it on the relocation + * list so its kernel mode MOB ID can be filled in later + */ + ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid, + &vmw_bo); + + if (ret != 0) + return ret; + + sw_context->dx_query_mob = vmw_bo; + sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; + + vmw_dmabuf_unreference(&vmw_bo); + + return ret; +} + + + /** * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. * @@ -1074,7 +1512,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); + ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); vmw_dmabuf_unreference(&vmw_bo); return ret; @@ -1128,7 +1566,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); + ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); vmw_dmabuf_unreference(&vmw_bo); return ret; @@ -1363,6 +1801,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) continue; + if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { + DRM_ERROR("Illegal texture/sampler unit %u.\n", + (unsigned) cur_state->stage); + return -EINVAL; + } + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cur_state->value, &res_node); @@ -1370,14 +1814,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, return ret; if (dev_priv->has_mob) { - struct vmw_ctx_bindinfo bi; - - bi.ctx = ctx_node->res; - bi.res = res_node ? res_node->res : NULL; - bi.bt = vmw_ctx_binding_tex; - bi.i1.texture_stage = cur_state->stage; - vmw_context_binding_add(ctx_node->staged_bindings, - &bi); + struct vmw_ctx_bindinfo_tex binding; + + binding.bi.ctx = ctx_node->res; + binding.bi.res = res_node ? res_node->res : NULL; + binding.bi.bt = vmw_ctx_binding_tex; + binding.texture_stage = cur_state->stage; + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, + 0, binding.texture_stage); } } @@ -1407,6 +1851,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, return ret; } + +/** + * vmw_cmd_res_switch_backup - Utility function to handle backup buffer + * switching + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @val_node: The validation node representing the resource. + * @buf_id: Pointer to the user-space backup buffer handle in the command + * stream. + * @backup_offset: Offset of backup into MOB. + * + * This function prepares for registering a switch of backup buffers + * in the resource metadata just prior to unreserving. It's basically a wrapper + * around vmw_cmd_res_switch_backup with a different interface. + */ +static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + struct vmw_resource_val_node *val_node, + uint32_t *buf_id, + unsigned long backup_offset) +{ + struct vmw_dma_buffer *dma_buf; + int ret; + + ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); + if (ret) + return ret; + + val_node->switching_backup = true; + if (val_node->first_usage) + val_node->no_buffer_needed = true; + + vmw_dmabuf_unreference(&val_node->new_backup); + val_node->new_backup = dma_buf; + val_node->new_backup_offset = backup_offset; + + return 0; +} + + /** * vmw_cmd_switch_backup - Utility function to handle backup buffer switching * @@ -1420,7 +1905,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, * @backup_offset: Offset of backup into MOB. * * This function prepares for registering a switch of backup buffers - * in the resource metadata just prior to unreserving. + * in the resource metadata just prior to unreserving. It's basically a wrapper + * around vmw_cmd_res_switch_backup with a different interface. */ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, @@ -1431,27 +1917,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, uint32_t *buf_id, unsigned long backup_offset) { - int ret; - struct vmw_dma_buffer *dma_buf; struct vmw_resource_val_node *val_node; + int ret; ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, converter, res_id, &val_node); - if (unlikely(ret != 0)) - return ret; - - ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); - if (unlikely(ret != 0)) + if (ret) return ret; - if (val_node->first_usage) - val_node->no_buffer_needed = true; - - vmw_dmabuf_unreference(&val_node->new_backup); - val_node->new_backup = dma_buf; - val_node->new_backup_offset = backup_offset; - - return 0; + return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, + buf_id, backup_offset); } /** @@ -1703,10 +2178,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, if (unlikely(!dev_priv->has_mob)) return 0; - ret = vmw_compat_shader_remove(vmw_context_res_man(val->res), - cmd->body.shid, - cmd->body.type, - &sw_context->staged_cmd_res); + ret = vmw_shader_remove(vmw_context_res_man(val->res), + cmd->body.shid, + cmd->body.type, + &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret; @@ -1734,13 +2209,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, SVGA3dCmdSetShader body; } *cmd; struct vmw_resource_val_node *ctx_node, *res_node = NULL; - struct vmw_ctx_bindinfo bi; + struct vmw_ctx_bindinfo_shader binding; struct vmw_resource *res = NULL; int ret; cmd = container_of(header, struct vmw_set_shader_cmd, header); + if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { + DRM_ERROR("Illegal shader type %u.\n", + (unsigned) cmd->body.type); + return -EINVAL; + } + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, user_context_converter, &cmd->body.cid, &ctx_node); @@ -1751,14 +2232,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, return 0; if (cmd->body.shid != SVGA3D_INVALID_ID) { - res = vmw_compat_shader_lookup - (vmw_context_res_man(ctx_node->res), - cmd->body.shid, - cmd->body.type); + res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), + cmd->body.shid, + cmd->body.type); if (!IS_ERR(res)) { ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, - vmw_res_shader, &cmd->body.shid, res, &res_node); vmw_resource_unreference(&res); @@ -1776,11 +2255,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, return ret; } - bi.ctx = ctx_node->res; - bi.res = res_node ? res_node->res : NULL; - bi.bt = vmw_ctx_binding_shader; - bi.i1.shader_type = cmd->body.type; - return vmw_context_binding_add(ctx_node->staged_bindings, &bi); + binding.bi.ctx = ctx_node->res; + binding.bi.res = res_node ? res_node->res : NULL; + binding.bi.bt = vmw_ctx_binding_shader; + binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, + binding.shader_slot, 0); + return 0; } /** @@ -1842,6 +2323,690 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, cmd->body.offsetInBytes); } +/** + * vmw_cmd_dx_set_single_constant_buffer - Validate an + * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int +vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetSingleConstantBuffer body; + } *cmd; + struct vmw_resource_val_node *res_node = NULL; + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_ctx_bindinfo_cb binding; + int ret; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + cmd = container_of(header, typeof(*cmd), header); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.sid, &res_node); + if (unlikely(ret != 0)) + return ret; + + binding.bi.ctx = ctx_node->res; + binding.bi.res = res_node ? res_node->res : NULL; + binding.bi.bt = vmw_ctx_binding_cb; + binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; + binding.offset = cmd->body.offsetInBytes; + binding.size = cmd->body.sizeInBytes; + binding.slot = cmd->body.slot; + + if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 || + binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + DRM_ERROR("Illegal const buffer shader %u slot %u.\n", + (unsigned) cmd->body.type, + (unsigned) binding.slot); + return -EINVAL; + } + + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, + binding.shader_slot, binding.slot); + + return 0; +} + +/** + * vmw_cmd_dx_set_shader_res - Validate an + * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetShaderResources body; + } *cmd = container_of(header, typeof(*cmd), header); + u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / + sizeof(SVGA3dShaderResourceViewId); + + if ((u64) cmd->body.startView + (u64) num_sr_view > + (u64) SVGA3D_DX_MAX_SRVIEWS || + cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { + DRM_ERROR("Invalid shader binding.\n"); + return -EINVAL; + } + + return vmw_view_bindings_add(sw_context, vmw_view_sr, + vmw_ctx_binding_sr, + cmd->body.type - SVGA3D_SHADERTYPE_MIN, + (void *) &cmd[1], num_sr_view, + cmd->body.startView); +} + +/** + * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetShader body; + } *cmd; + struct vmw_resource *res = NULL; + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_ctx_bindinfo_shader binding; + int ret = 0; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + cmd = container_of(header, typeof(*cmd), header); + + if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { + DRM_ERROR("Illegal shader type %u.\n", + (unsigned) cmd->body.type); + return -EINVAL; + } + + if (cmd->body.shaderId != SVGA3D_INVALID_ID) { + res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); + if (IS_ERR(res)) { + DRM_ERROR("Could not find shader for binding.\n"); + return PTR_ERR(res); + } + + ret = vmw_resource_val_add(sw_context, res, NULL); + if (ret) + goto out_unref; + } + + binding.bi.ctx = ctx_node->res; + binding.bi.res = res; + binding.bi.bt = vmw_ctx_binding_dx_shader; + binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; + + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, + binding.shader_slot, 0); +out_unref: + if (res) + vmw_resource_unreference(&res); + + return ret; +} + +/** + * vmw_cmd_dx_set_vertex_buffers - Validates an + * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_ctx_bindinfo_vb binding; + struct vmw_resource_val_node *res_node; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetVertexBuffers body; + SVGA3dVertexBuffer buf[]; + } *cmd; + int i, ret, num; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + cmd = container_of(header, typeof(*cmd), header); + num = (cmd->header.size - sizeof(cmd->body)) / + sizeof(SVGA3dVertexBuffer); + if ((u64)num + (u64)cmd->body.startBuffer > + (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { + DRM_ERROR("Invalid number of vertex buffers.\n"); + return -EINVAL; + } + + for (i = 0; i < num; i++) { + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->buf[i].sid, &res_node); + if (unlikely(ret != 0)) + return ret; + + binding.bi.ctx = ctx_node->res; + binding.bi.bt = vmw_ctx_binding_vb; + binding.bi.res = ((res_node) ? res_node->res : NULL); + binding.offset = cmd->buf[i].offset; + binding.stride = cmd->buf[i].stride; + binding.slot = i + cmd->body.startBuffer; + + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, + 0, binding.slot); + } + + return 0; +} + +/** + * vmw_cmd_dx_ia_set_vertex_buffers - Validate an + * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_ctx_bindinfo_ib binding; + struct vmw_resource_val_node *res_node; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetIndexBuffer body; + } *cmd; + int ret; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + cmd = container_of(header, typeof(*cmd), header); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.sid, &res_node); + if (unlikely(ret != 0)) + return ret; + + binding.bi.ctx = ctx_node->res; + binding.bi.res = ((res_node) ? res_node->res : NULL); + binding.bi.bt = vmw_ctx_binding_ib; + binding.offset = cmd->body.offset; + binding.format = cmd->body.format; + + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); + + return 0; +} + +/** + * vmw_cmd_dx_set_rendertarget - Validate an + * SVGA_3D_CMD_DX_SET_RENDERTARGETS command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetRenderTargets body; + } *cmd = container_of(header, typeof(*cmd), header); + int ret; + u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / + sizeof(SVGA3dRenderTargetViewId); + + if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { + DRM_ERROR("Invalid DX Rendertarget binding.\n"); + return -EINVAL; + } + + ret = vmw_view_bindings_add(sw_context, vmw_view_ds, + vmw_ctx_binding_ds, 0, + &cmd->body.depthStencilViewId, 1, 0); + if (ret) + return ret; + + return vmw_view_bindings_add(sw_context, vmw_view_rt, + vmw_ctx_binding_dx_rt, 0, + (void *)&cmd[1], num_rt_view, 0); +} + +/** + * vmw_cmd_dx_clear_rendertarget_view - Validate an + * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXClearRenderTargetView body; + } *cmd = container_of(header, typeof(*cmd), header); + + return vmw_view_id_val_add(sw_context, vmw_view_rt, + cmd->body.renderTargetViewId); +} + +/** + * vmw_cmd_dx_clear_rendertarget_view - Validate an + * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXClearDepthStencilView body; + } *cmd = container_of(header, typeof(*cmd), header); + + return vmw_view_id_val_add(sw_context, vmw_view_ds, + cmd->body.depthStencilViewId); +} + +static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_resource_val_node *srf_node; + struct vmw_resource *res; + enum vmw_view_type view_type; + int ret; + /* + * This is based on the fact that all affected define commands have + * the same initial command body layout. + */ + struct { + SVGA3dCmdHeader header; + uint32 defined_id; + uint32 sid; + } *cmd; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + view_type = vmw_view_cmd_to_type(header->id); + cmd = container_of(header, typeof(*cmd), header); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->sid, &srf_node); + if (unlikely(ret != 0)) + return ret; + + res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); + ret = vmw_cotable_notify(res, cmd->defined_id); + vmw_resource_unreference(&res); + if (unlikely(ret != 0)) + return ret; + + return vmw_view_add(sw_context->man, + ctx_node->res, + srf_node->res, + view_type, + cmd->defined_id, + header, + header->size + sizeof(*header), + &sw_context->staged_cmd_res); +} + +/** + * vmw_cmd_dx_set_so_targets - Validate an + * SVGA_3D_CMD_DX_SET_SOTARGETS command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_ctx_bindinfo_so binding; + struct vmw_resource_val_node *res_node; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXSetSOTargets body; + SVGA3dSoTarget targets[]; + } *cmd; + int i, ret, num; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + cmd = container_of(header, typeof(*cmd), header); + num = (cmd->header.size - sizeof(cmd->body)) / + sizeof(SVGA3dSoTarget); + + if (num > SVGA3D_DX_MAX_SOTARGETS) { + DRM_ERROR("Invalid DX SO binding.\n"); + return -EINVAL; + } + + for (i = 0; i < num; i++) { + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->targets[i].sid, &res_node); + if (unlikely(ret != 0)) + return ret; + + binding.bi.ctx = ctx_node->res; + binding.bi.res = ((res_node) ? res_node->res : NULL); + binding.bi.bt = vmw_ctx_binding_so, + binding.offset = cmd->targets[i].offset; + binding.size = cmd->targets[i].sizeInBytes; + binding.slot = i; + + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, + 0, binding.slot); + } + + return 0; +} + +static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_resource *res; + /* + * This is based on the fact that all affected define commands have + * the same initial command body layout. + */ + struct { + SVGA3dCmdHeader header; + uint32 defined_id; + } *cmd; + enum vmw_so_type so_type; + int ret; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + so_type = vmw_so_cmd_to_type(header->id); + res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); + cmd = container_of(header, typeof(*cmd), header); + ret = vmw_cotable_notify(res, cmd->defined_id); + vmw_resource_unreference(&res); + + return ret; +} + +/** + * vmw_cmd_dx_check_subresource - Validate an + * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + union { + SVGA3dCmdDXReadbackSubResource r_body; + SVGA3dCmdDXInvalidateSubResource i_body; + SVGA3dCmdDXUpdateSubResource u_body; + SVGA3dSurfaceId sid; + }; + } *cmd; + + BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != + offsetof(typeof(*cmd), sid)); + BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != + offsetof(typeof(*cmd), sid)); + BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != + offsetof(typeof(*cmd), sid)); + + cmd = container_of(header, typeof(*cmd), header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->sid, NULL); +} + +static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + + if (unlikely(ctx_node == NULL)) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + return 0; +} + +/** + * vmw_cmd_dx_view_remove - validate a view remove command and + * schedule the view resource for removal. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + * + * Check that the view exists, and if it was not created using this + * command batch, make sure it's validated (present in the device) so that + * the remove command will not confuse the device. + */ +static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct { + SVGA3dCmdHeader header; + union vmw_view_destroy body; + } *cmd = container_of(header, typeof(*cmd), header); + enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); + struct vmw_resource *view; + int ret; + + if (!ctx_node) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + ret = vmw_view_remove(sw_context->man, + cmd->body.view_id, view_type, + &sw_context->staged_cmd_res, + &view); + if (ret || !view) + return ret; + + /* + * Add view to the validate list iff it was not created using this + * command batch. + */ + return vmw_view_res_val_add(sw_context, view); +} + +/** + * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct vmw_resource *res; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXDefineShader body; + } *cmd = container_of(header, typeof(*cmd), header); + int ret; + + if (!ctx_node) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); + ret = vmw_cotable_notify(res, cmd->body.shaderId); + vmw_resource_unreference(&res); + if (ret) + return ret; + + return vmw_dx_shader_add(sw_context->man, ctx_node->res, + cmd->body.shaderId, cmd->body.type, + &sw_context->staged_cmd_res); +} + +/** + * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXDestroyShader body; + } *cmd = container_of(header, typeof(*cmd), header); + int ret; + + if (!ctx_node) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + + ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, + &sw_context->staged_cmd_res); + if (ret) + DRM_ERROR("Could not find shader to remove.\n"); + + return ret; +} + +/** + * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_resource_val_node *ctx_node; + struct vmw_resource_val_node *res_node; + struct vmw_resource *res; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXBindShader body; + } *cmd = container_of(header, typeof(*cmd), header); + int ret; + + if (cmd->body.cid != SVGA3D_INVALID_ID) { + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, + &cmd->body.cid, &ctx_node); + if (ret) + return ret; + } else { + ctx_node = sw_context->dx_ctx_node; + if (!ctx_node) { + DRM_ERROR("DX Context not set.\n"); + return -EINVAL; + } + } + + res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), + cmd->body.shid, 0); + if (IS_ERR(res)) { + DRM_ERROR("Could not find shader to bind.\n"); + return PTR_ERR(res); + } + + ret = vmw_resource_val_add(sw_context, res, &res_node); + if (ret) { + DRM_ERROR("Error creating resource validation node.\n"); + goto out_unref; + } + + + ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node, + &cmd->body.mobid, + cmd->body.offsetInBytes); +out_unref: + vmw_resource_unreference(&res); + + return ret; +} + static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) @@ -1849,7 +3014,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, uint32_t size_remaining = *size; uint32_t cmd_id; - cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); + cmd_id = ((uint32_t *)buf)[0]; switch (cmd_id) { case SVGA_CMD_UPDATE: *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); @@ -1980,7 +3145,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, false, false, true), - VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, + VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, false, false, true), @@ -2051,7 +3216,147 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, - true, false, true) + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, + false, false, true), + + /* + * DX commands + */ + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, + &vmw_cmd_dx_set_single_constant_buffer, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, + &vmw_cmd_dx_set_shader_res, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, + &vmw_cmd_dx_set_vertex_buffers, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, + &vmw_cmd_dx_set_index_buffer, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, + &vmw_cmd_dx_set_rendertargets, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, + &vmw_cmd_ok, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, + &vmw_cmd_dx_clear_rendertarget_view, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, + &vmw_cmd_dx_clear_depthstencil_view, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, + &vmw_cmd_dx_check_subresource, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, + &vmw_cmd_dx_check_subresource, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, + &vmw_cmd_dx_check_subresource, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, + &vmw_cmd_dx_view_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, + &vmw_cmd_dx_view_remove, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, + &vmw_cmd_dx_view_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, + &vmw_cmd_dx_view_remove, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, + &vmw_cmd_dx_view_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, + &vmw_cmd_dx_view_remove, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, + &vmw_cmd_dx_so_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, + &vmw_cmd_dx_so_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, + &vmw_cmd_dx_so_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, + &vmw_cmd_dx_so_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, + &vmw_cmd_dx_so_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, + &vmw_cmd_dx_define_shader, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, + &vmw_cmd_dx_destroy_shader, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, + &vmw_cmd_dx_bind_shader, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, + &vmw_cmd_dx_so_define, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS, + &vmw_cmd_dx_set_so_targets, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY, + &vmw_cmd_buffer_copy_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, + &vmw_cmd_pred_copy_check, true, false, true), }; static int vmw_cmd_check(struct vmw_private *dev_priv, @@ -2065,14 +3370,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, const struct vmw_cmd_entry *entry; bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; - cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); + cmd_id = ((uint32_t *)buf)[0]; /* Handle any none 3D commands */ if (unlikely(cmd_id < SVGA_CMD_MAX)) return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); - cmd_id = le32_to_cpu(header->id); - *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); + cmd_id = header->id; + *size = header->size + sizeof(SVGA3dCmdHeader); cmd_id -= SVGA_3D_CMD_BASE; if (unlikely(*size > size_remaining)) @@ -2184,7 +3489,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) * * @list: The resource list. */ -static void vmw_resource_list_unreference(struct list_head *list) +static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context, + struct list_head *list) { struct vmw_resource_val_node *val, *val_next; @@ -2195,8 +3501,15 @@ static void vmw_resource_list_unreference(struct list_head *list) list_for_each_entry_safe(val, val_next, list, head) { list_del_init(&val->head); vmw_resource_unreference(&val->res); - if (unlikely(val->staged_bindings)) - kfree(val->staged_bindings); + + if (val->staged_bindings) { + if (val->staged_bindings != sw_context->staged_bindings) + vmw_binding_state_free(val->staged_bindings); + else + sw_context->staged_bindings_inuse = false; + val->staged_bindings = NULL; + } + kfree(val); } } @@ -2222,24 +3535,21 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context) (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); } -static int vmw_validate_single_buffer(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo, - bool validate_as_mob) +int vmw_validate_single_buffer(struct vmw_private *dev_priv, + struct ttm_buffer_object *bo, + bool interruptible, + bool validate_as_mob) { + struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, + base); int ret; - - /* - * Don't validate pinned buffers. - */ - - if (bo == dev_priv->pinned_bo || - (bo == dev_priv->dummy_query_bo && - dev_priv->dummy_query_bo_pinned)) + if (vbo->pin_count > 0) return 0; if (validate_as_mob) - return ttm_bo_validate(bo, &vmw_mob_placement, true, false); + return ttm_bo_validate(bo, &vmw_mob_placement, interruptible, + false); /** * Put BO in VRAM if there is space, otherwise as a GMR. @@ -2248,7 +3558,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, * used as a GMR, this will return -ENOMEM. */ - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, + false); if (likely(ret == 0 || ret == -ERESTARTSYS)) return ret; @@ -2257,8 +3568,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, * previous contents. */ - DRM_INFO("Falling through to VRAM.\n"); - ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); + ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); return ret; } @@ -2270,6 +3580,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv, list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, + true, entry->validate_as_mob); if (unlikely(ret != 0)) return ret; @@ -2417,7 +3728,164 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, } } +/** + * vmw_execbuf_submit_fifo - Patch a command batch and submit it using + * the fifo. + * + * @dev_priv: Pointer to a device private structure. + * @kernel_commands: Pointer to the unpatched command batch. + * @command_size: Size of the unpatched command batch. + * @sw_context: Structure holding the relocation lists. + * + * Side effects: If this function returns 0, then the command batch + * pointed to by @kernel_commands will have been modified. + */ +static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, + void *kernel_commands, + u32 command_size, + struct vmw_sw_context *sw_context) +{ + void *cmd; + + if (sw_context->dx_ctx_node) + cmd = vmw_fifo_reserve_dx(dev_priv, command_size, + sw_context->dx_ctx_node->res->id); + else + cmd = vmw_fifo_reserve(dev_priv, command_size); + if (!cmd) { + DRM_ERROR("Failed reserving fifo space for commands.\n"); + return -ENOMEM; + } + + vmw_apply_relocations(sw_context); + memcpy(cmd, kernel_commands, command_size); + vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); + vmw_resource_relocations_free(&sw_context->res_relocations); + vmw_fifo_commit(dev_priv, command_size); + + return 0; +} + +/** + * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using + * the command buffer manager. + * + * @dev_priv: Pointer to a device private structure. + * @header: Opaque handle to the command buffer allocation. + * @command_size: Size of the unpatched command batch. + * @sw_context: Structure holding the relocation lists. + * + * Side effects: If this function returns 0, then the command buffer + * represented by @header will have been modified. + */ +static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, + struct vmw_cmdbuf_header *header, + u32 command_size, + struct vmw_sw_context *sw_context) +{ + u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : + SVGA3D_INVALID_ID); + void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, + id, false, header); + + vmw_apply_relocations(sw_context); + vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); + vmw_resource_relocations_free(&sw_context->res_relocations); + vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); + + return 0; +} + +/** + * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for + * submission using a command buffer. + * + * @dev_priv: Pointer to a device private structure. + * @user_commands: User-space pointer to the commands to be submitted. + * @command_size: Size of the unpatched command batch. + * @header: Out parameter returning the opaque pointer to the command buffer. + * + * This function checks whether we can use the command buffer manager for + * submission and if so, creates a command buffer of suitable size and + * copies the user data into that buffer. + * + * On successful return, the function returns a pointer to the data in the + * command buffer and *@header is set to non-NULL. + * If command buffers could not be used, the function will return the value + * of @kernel_commands on function call. That value may be NULL. In that case, + * the value of *@header will be set to NULL. + * If an error is encountered, the function will return a pointer error value. + * If the function is interrupted by a signal while sleeping, it will return + * -ERESTARTSYS casted to a pointer error value. + */ +static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, + void __user *user_commands, + void *kernel_commands, + u32 command_size, + struct vmw_cmdbuf_header **header) +{ + size_t cmdbuf_size; + int ret; + + *header = NULL; + if (!dev_priv->cman || kernel_commands) + return kernel_commands; + + if (command_size > SVGA_CB_MAX_SIZE) { + DRM_ERROR("Command buffer is too large.\n"); + return ERR_PTR(-EINVAL); + } + + /* If possible, add a little space for fencing. */ + cmdbuf_size = command_size + 512; + cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); + kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, + true, header); + if (IS_ERR(kernel_commands)) + return kernel_commands; + + ret = copy_from_user(kernel_commands, user_commands, + command_size); + if (ret) { + DRM_ERROR("Failed copying commands.\n"); + vmw_cmdbuf_header_free(*header); + *header = NULL; + return ERR_PTR(-EFAULT); + } + + return kernel_commands; +} + +static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + uint32_t handle) +{ + struct vmw_resource_val_node *ctx_node; + struct vmw_resource *res; + int ret; + + if (handle == SVGA3D_INVALID_ID) + return 0; + ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, + handle, user_context_converter, + &res); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not find or user DX context 0x%08x.\n", + (unsigned) handle); + return ret; + } + + ret = vmw_resource_val_add(sw_context, res, &ctx_node); + if (unlikely(ret != 0)) + goto out_err; + + sw_context->dx_ctx_node = ctx_node; + sw_context->man = vmw_context_res_man(res); +out_err: + vmw_resource_unreference(&res); + return ret; +} int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, @@ -2425,6 +3893,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, void *kernel_commands, uint32_t command_size, uint64_t throttle_us, + uint32_t dx_context_handle, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj **out_fence) { @@ -2432,18 +3901,33 @@ int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_fence_obj *fence = NULL; struct vmw_resource *error_resource; struct list_head resource_list; + struct vmw_cmdbuf_header *header; struct ww_acquire_ctx ticket; uint32_t handle; - void *cmd; int ret; + if (throttle_us) { + ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, + throttle_us); + + if (ret) + return ret; + } + + kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, + kernel_commands, command_size, + &header); + if (IS_ERR(kernel_commands)) + return PTR_ERR(kernel_commands); + ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); - if (unlikely(ret != 0)) - return -ERESTARTSYS; + if (ret) { + ret = -ERESTARTSYS; + goto out_free_header; + } + sw_context->kernel = false; if (kernel_commands == NULL) { - sw_context->kernel = false; - ret = vmw_resize_cmd_bounce(sw_context, command_size); if (unlikely(ret != 0)) goto out_unlock; @@ -2458,19 +3942,26 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_unlock; } kernel_commands = sw_context->cmd_bounce; - } else + } else if (!header) sw_context->kernel = true; sw_context->fp = vmw_fpriv(file_priv); sw_context->cur_reloc = 0; sw_context->cur_val_buf = 0; INIT_LIST_HEAD(&sw_context->resource_list); + INIT_LIST_HEAD(&sw_context->ctx_resource_list); sw_context->cur_query_bo = dev_priv->pinned_bo; sw_context->last_query_ctx = NULL; sw_context->needs_post_query_barrier = false; + sw_context->dx_ctx_node = NULL; + sw_context->dx_query_mob = NULL; + sw_context->dx_query_ctx = NULL; memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); INIT_LIST_HEAD(&sw_context->validate_nodes); INIT_LIST_HEAD(&sw_context->res_relocations); + if (sw_context->staged_bindings) + vmw_binding_state_reset(sw_context->staged_bindings); + if (!sw_context->res_ht_initialized) { ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); if (unlikely(ret != 0)) @@ -2478,10 +3969,24 @@ int vmw_execbuf_process(struct drm_file *file_priv, sw_context->res_ht_initialized = true; } INIT_LIST_HEAD(&sw_context->staged_cmd_res); - INIT_LIST_HEAD(&resource_list); + ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); + if (unlikely(ret != 0)) { + list_splice_init(&sw_context->ctx_resource_list, + &sw_context->resource_list); + goto out_err_nores; + } + ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, command_size); + /* + * Merge the resource lists before checking the return status + * from vmd_cmd_check_all so that all the open hashtabs will + * be handled properly even if vmw_cmd_check_all fails. + */ + list_splice_init(&sw_context->ctx_resource_list, + &sw_context->resource_list); + if (unlikely(ret != 0)) goto out_err_nores; @@ -2492,7 +3997,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true, NULL); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = vmw_validate_buffers(dev_priv, sw_context); if (unlikely(ret != 0)) @@ -2502,14 +4007,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (unlikely(ret != 0)) goto out_err; - if (throttle_us) { - ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, - throttle_us); - - if (unlikely(ret != 0)) - goto out_err; - } - ret = mutex_lock_interruptible(&dev_priv->binding_mutex); if (unlikely(ret != 0)) { ret = -ERESTARTSYS; @@ -2522,20 +4019,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_unlock_binding; } - cmd = vmw_fifo_reserve(dev_priv, command_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving fifo space for commands.\n"); - ret = -ENOMEM; - goto out_unlock_binding; + if (!header) { + ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, + command_size, sw_context); + } else { + ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, + sw_context); + header = NULL; } - - vmw_apply_relocations(sw_context); - memcpy(cmd, kernel_commands, command_size); - - vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); - vmw_resource_relocations_free(&sw_context->res_relocations); - - vmw_fifo_commit(dev_priv, command_size); + mutex_unlock(&dev_priv->binding_mutex); + if (ret) + goto out_err; vmw_query_bo_switch_commit(dev_priv, sw_context); ret = vmw_execbuf_fence_commands(file_priv, dev_priv, @@ -2550,8 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (ret != 0) DRM_ERROR("Fence submission error. Syncing.\n"); - vmw_resource_list_unreserve(&sw_context->resource_list, false); - mutex_unlock(&dev_priv->binding_mutex); + vmw_resources_unreserve(sw_context, false); ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, (void *) fence); @@ -2580,7 +4073,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, * Unreference resources outside of the cmdbuf_mutex to * avoid deadlocks in resource destruction paths. */ - vmw_resource_list_unreference(&resource_list); + vmw_resource_list_unreference(sw_context, &resource_list); return 0; @@ -2589,7 +4082,7 @@ out_unlock_binding: out_err: ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); out_err_nores: - vmw_resource_list_unreserve(&sw_context->resource_list, true); + vmw_resources_unreserve(sw_context, true); vmw_resource_relocations_free(&sw_context->res_relocations); vmw_free_relocations(sw_context); vmw_clear_validations(sw_context); @@ -2607,9 +4100,12 @@ out_unlock: * Unreference resources outside of the cmdbuf_mutex to * avoid deadlocks in resource destruction paths. */ - vmw_resource_list_unreference(&resource_list); + vmw_resource_list_unreference(sw_context, &resource_list); if (unlikely(error_resource != NULL)) vmw_resource_unreference(&error_resource); +out_free_header: + if (header) + vmw_cmdbuf_header_free(header); return ret; } @@ -2628,9 +4124,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); - vmw_bo_pin(dev_priv->pinned_bo, false); - vmw_bo_pin(dev_priv->dummy_query_bo, false); - dev_priv->dummy_query_bo_pinned = false; + vmw_bo_pin_reserved(dev_priv->pinned_bo, false); + if (dev_priv->dummy_query_bo_pinned) { + vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); + dev_priv->dummy_query_bo_pinned = false; + } } @@ -2672,11 +4170,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, INIT_LIST_HEAD(&validate_list); - pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); + pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base); pinned_val.shared = false; list_add_tail(&pinned_val.head, &validate_list); - query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); + query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base); query_val.shared = false; list_add_tail(&query_val.head, &validate_list); @@ -2697,10 +4195,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, dev_priv->query_cid_valid = false; } - vmw_bo_pin(dev_priv->pinned_bo, false); - vmw_bo_pin(dev_priv->dummy_query_bo, false); - dev_priv->dummy_query_bo_pinned = false; - + vmw_bo_pin_reserved(dev_priv->pinned_bo, false); + if (dev_priv->dummy_query_bo_pinned) { + vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); + dev_priv->dummy_query_bo_pinned = false; + } if (fence == NULL) { (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, NULL); @@ -2712,7 +4211,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); - ttm_bo_unref(&dev_priv->pinned_bo); + vmw_dmabuf_unreference(&dev_priv->pinned_bo); + DRM_INFO("Dummy query bo pin count: %d\n", + dev_priv->dummy_query_bo->pin_count); out_unlock: return; @@ -2722,7 +4223,7 @@ out_no_emit: out_no_reserve: ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); - ttm_bo_unref(&dev_priv->pinned_bo); + vmw_dmabuf_unreference(&dev_priv->pinned_bo); } /** @@ -2751,36 +4252,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) mutex_unlock(&dev_priv->cmdbuf_mutex); } - -int vmw_execbuf_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, + struct drm_file *file_priv, size_t size) { struct vmw_private *dev_priv = vmw_priv(dev); - struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; + struct drm_vmw_execbuf_arg arg; int ret; + static const size_t copy_offset[] = { + offsetof(struct drm_vmw_execbuf_arg, context_handle), + sizeof(struct drm_vmw_execbuf_arg)}; + + if (unlikely(size < copy_offset[0])) { + DRM_ERROR("Invalid command size, ioctl %d\n", + DRM_VMW_EXECBUF); + return -EINVAL; + } + + if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0) + return -EFAULT; /* - * This will allow us to extend the ioctl argument while + * Extend the ioctl argument while * maintaining backwards compatibility: * We take different code paths depending on the value of - * arg->version. + * arg.version. */ - if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { + if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION || + arg.version == 0)) { DRM_ERROR("Incorrect execbuf version.\n"); - DRM_ERROR("You're running outdated experimental " - "vmwgfx user-space drivers."); return -EINVAL; } + if (arg.version > 1 && + copy_from_user(&arg.context_handle, + (void __user *) (data + copy_offset[0]), + copy_offset[arg.version - 1] - + copy_offset[0]) != 0) + return -EFAULT; + + switch (arg.version) { + case 1: + arg.context_handle = (uint32_t) -1; + break; + case 2: + if (arg.pad64 != 0) { + DRM_ERROR("Unused IOCTL data not set to zero.\n"); + return -EINVAL; + } + break; + default: + break; + } + ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (unlikely(ret != 0)) return ret; ret = vmw_execbuf_process(file_priv, dev_priv, - (void __user *)(unsigned long)arg->commands, - NULL, arg->command_size, arg->throttle_us, - (void __user *)(unsigned long)arg->fence_rep, + (void __user *)(unsigned long)arg.commands, + NULL, arg.command_size, arg.throttle_us, + arg.context_handle, + (void __user *)(unsigned long)arg.fence_rep, NULL); ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 0a474f391fad..042c5b4c706c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -1,7 +1,7 @@ /************************************************************************** * * Copyright © 2007 David Airlie - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include "vmwgfx_drv.h" +#include "vmwgfx_kms.h" #include <drm/ttm/ttm_placement.h> @@ -40,21 +41,22 @@ struct vmw_fb_par { void *vmalloc; + struct mutex bo_mutex; struct vmw_dma_buffer *vmw_bo; struct ttm_bo_kmap_obj map; + void *bo_ptr; + unsigned bo_size; + struct drm_framebuffer *set_fb; + struct drm_display_mode *set_mode; + u32 fb_x; + u32 fb_y; + bool bo_iowrite; u32 pseudo_palette[17]; - unsigned depth; - unsigned bpp; - unsigned max_width; unsigned max_height; - void *bo_ptr; - unsigned bo_size; - bool bo_iowrite; - struct { spinlock_t lock; bool active; @@ -63,6 +65,11 @@ struct vmw_fb_par { unsigned x2; unsigned y2; } dirty; + + struct drm_crtc *crtc; + struct drm_connector *con; + + bool local_mode; }; static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, @@ -77,7 +84,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, return 1; } - switch (par->depth) { + switch (par->set_fb->depth) { case 24: case 32: pal[regno] = ((red & 0xff00) << 8) | @@ -85,7 +92,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, ((blue & 0xff00) >> 8); break; default: - DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); + DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth, + par->set_fb->bits_per_pixel); return 1; } @@ -134,12 +142,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, return -EINVAL; } - if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && - (var->xoffset != 0 || var->yoffset != 0)) { - DRM_ERROR("Can not handle panning without display topology\n"); - return -EINVAL; - } - if ((var->xoffset + var->xres) > par->max_width || (var->yoffset + var->yres) > par->max_height) { DRM_ERROR("Requested geom can not fit in framebuffer\n"); @@ -156,46 +158,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, return 0; } -static int vmw_fb_set_par(struct fb_info *info) -{ - struct vmw_fb_par *par = info->par; - struct vmw_private *vmw_priv = par->vmw_priv; - int ret; - - info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8; - - ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, - info->fix.line_length, - par->bpp, par->depth); - if (ret) - return ret; - - if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { - /* TODO check if pitch and offset changes */ - vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); - } - - /* This is really helpful since if this fails the user - * can probably not see anything on the screen. - */ - WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); - - return 0; -} - -static int vmw_fb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - return 0; -} - static int vmw_fb_blank(int blank, struct fb_info *info) { return 0; @@ -209,54 +171,77 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par) { struct vmw_private *vmw_priv = par->vmw_priv; struct fb_info *info = vmw_priv->fb_info; - int stride = (info->fix.line_length / 4); - int *src = (int *)info->screen_base; - __le32 __iomem *vram_mem = par->bo_ptr; - unsigned long flags; - unsigned x, y, w, h; - int i, k; - struct { - uint32_t header; - SVGAFifoCmdUpdate body; - } *cmd; + unsigned long irq_flags; + s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h; + u32 cpp, max_x, max_y; + struct drm_clip_rect clip; + struct drm_framebuffer *cur_fb; + u8 *src_ptr, *dst_ptr; if (vmw_priv->suspended) return; - spin_lock_irqsave(&par->dirty.lock, flags); - if (!par->dirty.active) { - spin_unlock_irqrestore(&par->dirty.lock, flags); - return; - } - x = par->dirty.x1; - y = par->dirty.y1; - w = min(par->dirty.x2, info->var.xres) - x; - h = min(par->dirty.y2, info->var.yres) - y; - par->dirty.x1 = par->dirty.x2 = 0; - par->dirty.y1 = par->dirty.y2 = 0; - spin_unlock_irqrestore(&par->dirty.lock, flags); + mutex_lock(&par->bo_mutex); + cur_fb = par->set_fb; + if (!cur_fb) + goto out_unlock; - for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { - for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) - iowrite32(src[k], vram_mem + k); + spin_lock_irqsave(&par->dirty.lock, irq_flags); + if (!par->dirty.active) { + spin_unlock_irqrestore(&par->dirty.lock, irq_flags); + goto out_unlock; } -#if 0 - DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); -#endif + /* + * Handle panning when copying from vmalloc to framebuffer. + * Clip dirty area to framebuffer. + */ + cpp = (cur_fb->bits_per_pixel + 7) / 8; + max_x = par->fb_x + cur_fb->width; + max_y = par->fb_y + cur_fb->height; + + dst_x1 = par->dirty.x1 - par->fb_x; + dst_y1 = par->dirty.y1 - par->fb_y; + dst_x1 = max_t(s32, dst_x1, 0); + dst_y1 = max_t(s32, dst_y1, 0); + + dst_x2 = par->dirty.x2 - par->fb_x; + dst_y2 = par->dirty.y2 - par->fb_y; + dst_x2 = min_t(s32, dst_x2, max_x); + dst_y2 = min_t(s32, dst_y2, max_y); + w = dst_x2 - dst_x1; + h = dst_y2 - dst_y1; + w = max_t(s32, 0, w); + h = max_t(s32, 0, h); - cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Fifo reserve failed.\n"); - return; + par->dirty.x1 = par->dirty.x2 = 0; + par->dirty.y1 = par->dirty.y2 = 0; + spin_unlock_irqrestore(&par->dirty.lock, irq_flags); + + if (w && h) { + dst_ptr = (u8 *)par->bo_ptr + + (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp); + src_ptr = (u8 *)par->vmalloc + + ((dst_y1 + par->fb_y) * info->fix.line_length + + (dst_x1 + par->fb_x) * cpp); + + while (h-- > 0) { + memcpy(dst_ptr, src_ptr, w*cpp); + dst_ptr += par->set_fb->pitches[0]; + src_ptr += info->fix.line_length; + } + + clip.x1 = dst_x1; + clip.x2 = dst_x2; + clip.y1 = dst_y1; + clip.y2 = dst_y2; + + WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0, + &clip, 1)); + vmw_fifo_flush(vmw_priv, false); } - - cmd->header = cpu_to_le32(SVGA_CMD_UPDATE); - cmd->body.x = cpu_to_le32(x); - cmd->body.y = cpu_to_le32(y); - cmd->body.width = cpu_to_le32(w); - cmd->body.height = cpu_to_le32(h); - vmw_fifo_commit(vmw_priv, sizeof(*cmd)); +out_unlock: + mutex_unlock(&par->bo_mutex); } static void vmw_fb_dirty_mark(struct vmw_fb_par *par, @@ -291,6 +276,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par, spin_unlock_irqrestore(&par->dirty.lock, flags); } +static int vmw_fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct vmw_fb_par *par = info->par; + + if ((var->xoffset + var->xres) > var->xres_virtual || + (var->yoffset + var->yres) > var->yres_virtual) { + DRM_ERROR("Requested panning can not fit in framebuffer\n"); + return -EINVAL; + } + + mutex_lock(&par->bo_mutex); + par->fb_x = var->xoffset; + par->fb_y = var->yoffset; + if (par->set_fb) + vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width, + par->set_fb->height); + mutex_unlock(&par->bo_mutex); + + return 0; +} + static void vmw_deferred_io(struct fb_info *info, struct list_head *pagelist) { @@ -324,7 +331,7 @@ static void vmw_deferred_io(struct fb_info *info, vmw_fb_dirty_flush(par); }; -struct fb_deferred_io vmw_defio = { +static struct fb_deferred_io vmw_defio = { .delay = VMW_DIRTY_DELAY, .deferred_io = vmw_deferred_io, }; @@ -358,33 +365,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) * Bring up code */ -static struct fb_ops vmw_fb_ops = { - .owner = THIS_MODULE, - .fb_check_var = vmw_fb_check_var, - .fb_set_par = vmw_fb_set_par, - .fb_setcolreg = vmw_fb_setcolreg, - .fb_fillrect = vmw_fb_fillrect, - .fb_copyarea = vmw_fb_copyarea, - .fb_imageblit = vmw_fb_imageblit, - .fb_pan_display = vmw_fb_pan_display, - .fb_blank = vmw_fb_blank, -}; - static int vmw_fb_create_bo(struct vmw_private *vmw_priv, size_t size, struct vmw_dma_buffer **out) { struct vmw_dma_buffer *vmw_bo; - struct ttm_place ne_place = vmw_vram_ne_placement.placement[0]; - struct ttm_placement ne_placement; int ret; - ne_placement.num_placement = 1; - ne_placement.placement = &ne_place; - ne_placement.num_busy_placement = 1; - ne_placement.busy_placement = &ne_place; - - ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - (void) ttm_write_lock(&vmw_priv->reservation_sem, false); vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); @@ -394,31 +380,265 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, } ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, - &ne_placement, + &vmw_sys_placement, false, &vmw_dmabuf_bo_free); if (unlikely(ret != 0)) goto err_unlock; /* init frees the buffer on failure */ *out = vmw_bo; - - ttm_write_unlock(&vmw_priv->fbdev_master.lock); + ttm_write_unlock(&vmw_priv->reservation_sem); return 0; err_unlock: - ttm_write_unlock(&vmw_priv->fbdev_master.lock); + ttm_write_unlock(&vmw_priv->reservation_sem); + return ret; +} + +static int vmw_fb_compute_depth(struct fb_var_screeninfo *var, + int *depth) +{ + switch (var->bits_per_pixel) { + case 32: + *depth = (var->transp.length > 0) ? 32 : 24; + break; + default: + DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); + return -EINVAL; + } + + return 0; +} + +static int vmw_fb_kms_detach(struct vmw_fb_par *par, + bool detach_bo, + bool unref_bo) +{ + struct drm_framebuffer *cur_fb = par->set_fb; + int ret; + + /* Detach the KMS framebuffer from crtcs */ + if (par->set_mode) { + struct drm_mode_set set; + + set.crtc = par->crtc; + set.x = 0; + set.y = 0; + set.mode = NULL; + set.fb = NULL; + set.num_connectors = 1; + set.connectors = &par->con; + ret = drm_mode_set_config_internal(&set); + if (ret) { + DRM_ERROR("Could not unset a mode.\n"); + return ret; + } + drm_mode_destroy(par->vmw_priv->dev, par->set_mode); + par->set_mode = NULL; + } + + if (cur_fb) { + drm_framebuffer_unreference(cur_fb); + par->set_fb = NULL; + } + + if (par->vmw_bo && detach_bo) { + if (par->bo_ptr) { + ttm_bo_kunmap(&par->map); + par->bo_ptr = NULL; + } + if (unref_bo) + vmw_dmabuf_unreference(&par->vmw_bo); + else + vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false); + } + + return 0; +} + +static int vmw_fb_kms_framebuffer(struct fb_info *info) +{ + struct drm_mode_fb_cmd mode_cmd; + struct vmw_fb_par *par = info->par; + struct fb_var_screeninfo *var = &info->var; + struct drm_framebuffer *cur_fb; + struct vmw_framebuffer *vfb; + int ret = 0; + size_t new_bo_size; + + ret = vmw_fb_compute_depth(var, &mode_cmd.depth); + if (ret) + return ret; + + mode_cmd.width = var->xres; + mode_cmd.height = var->yres; + mode_cmd.bpp = var->bits_per_pixel; + mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width; + + cur_fb = par->set_fb; + if (cur_fb && cur_fb->width == mode_cmd.width && + cur_fb->height == mode_cmd.height && + cur_fb->bits_per_pixel == mode_cmd.bpp && + cur_fb->depth == mode_cmd.depth && + cur_fb->pitches[0] == mode_cmd.pitch) + return 0; + + /* Need new buffer object ? */ + new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height; + ret = vmw_fb_kms_detach(par, + par->bo_size < new_bo_size || + par->bo_size > 2*new_bo_size, + true); + if (ret) + return ret; + + if (!par->vmw_bo) { + ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size, + &par->vmw_bo); + if (ret) { + DRM_ERROR("Failed creating a buffer object for " + "fbdev.\n"); + return ret; + } + par->bo_size = new_bo_size; + } + + vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL, + true, &mode_cmd); + if (IS_ERR(vfb)) + return PTR_ERR(vfb); + + par->set_fb = &vfb->base; + + if (!par->bo_ptr) { + /* + * Pin before mapping. Since we don't know in what placement + * to pin, call into KMS to do it for us. + */ + ret = vfb->pin(vfb); + if (ret) { + DRM_ERROR("Could not pin the fbdev framebuffer.\n"); + return ret; + } + + ret = ttm_bo_kmap(&par->vmw_bo->base, 0, + par->vmw_bo->base.num_pages, &par->map); + if (ret) { + vfb->unpin(vfb); + DRM_ERROR("Could not map the fbdev framebuffer.\n"); + return ret; + } + + par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); + } + + return 0; +} + +static int vmw_fb_set_par(struct fb_info *info) +{ + struct vmw_fb_par *par = info->par; + struct vmw_private *vmw_priv = par->vmw_priv; + struct drm_mode_set set; + struct fb_var_screeninfo *var = &info->var; + struct drm_display_mode new_mode = { DRM_MODE("fb_mode", + DRM_MODE_TYPE_DRIVER, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) + }; + struct drm_display_mode *old_mode; + struct drm_display_mode *mode; + int ret; + + old_mode = par->set_mode; + mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); + if (!mode) { + DRM_ERROR("Could not create new fb mode.\n"); + return -ENOMEM; + } + + mode->hdisplay = var->xres; + mode->vdisplay = var->yres; + vmw_guess_mode_timing(mode); + + if (old_mode && drm_mode_equal(old_mode, mode)) { + drm_mode_destroy(vmw_priv->dev, mode); + mode = old_mode; + old_mode = NULL; + } else if (!vmw_kms_validate_mode_vram(vmw_priv, + mode->hdisplay * + (var->bits_per_pixel + 7) / 8, + mode->vdisplay)) { + drm_mode_destroy(vmw_priv->dev, mode); + return -EINVAL; + } + + mutex_lock(&par->bo_mutex); + drm_modeset_lock_all(vmw_priv->dev); + ret = vmw_fb_kms_framebuffer(info); + if (ret) + goto out_unlock; + + par->fb_x = var->xoffset; + par->fb_y = var->yoffset; + + set.crtc = par->crtc; + set.x = 0; + set.y = 0; + set.mode = mode; + set.fb = par->set_fb; + set.num_connectors = 1; + set.connectors = &par->con; + + ret = drm_mode_set_config_internal(&set); + if (ret) + goto out_unlock; + + vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, + par->set_fb->width, par->set_fb->height); + + /* If there already was stuff dirty we wont + * schedule a new work, so lets do it now */ + +#if (defined(VMWGFX_STANDALONE) && defined(VMWGFX_FB_DEFERRED)) + schedule_delayed_work(&par->def_par.deferred_work, 0); +#else + schedule_delayed_work(&info->deferred_work, 0); +#endif + +out_unlock: + if (old_mode) + drm_mode_destroy(vmw_priv->dev, old_mode); + par->set_mode = mode; + + drm_modeset_unlock_all(vmw_priv->dev); + mutex_unlock(&par->bo_mutex); + return ret; } + +static struct fb_ops vmw_fb_ops = { + .owner = THIS_MODULE, + .fb_check_var = vmw_fb_check_var, + .fb_set_par = vmw_fb_set_par, + .fb_setcolreg = vmw_fb_setcolreg, + .fb_fillrect = vmw_fb_fillrect, + .fb_copyarea = vmw_fb_copyarea, + .fb_imageblit = vmw_fb_imageblit, + .fb_pan_display = vmw_fb_pan_display, + .fb_blank = vmw_fb_blank, +}; + int vmw_fb_init(struct vmw_private *vmw_priv) { struct device *device = &vmw_priv->dev->pdev->dev; struct vmw_fb_par *par; struct fb_info *info; - unsigned initial_width, initial_height; unsigned fb_width, fb_height; unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size; + struct drm_display_mode *init_mode; int ret; fb_bpp = 32; @@ -428,9 +648,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv) fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); - initial_width = min(vmw_priv->initial_width, fb_width); - initial_height = min(vmw_priv->initial_height, fb_height); - fb_pitch = fb_width * fb_bpp / 8; fb_size = fb_pitch * fb_height; fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); @@ -444,35 +661,34 @@ int vmw_fb_init(struct vmw_private *vmw_priv) */ vmw_priv->fb_info = info; par = info->par; + memset(par, 0, sizeof(*par)); par->vmw_priv = vmw_priv; - par->depth = fb_depth; - par->bpp = fb_bpp; par->vmalloc = NULL; par->max_width = fb_width; par->max_height = fb_height; + drm_modeset_lock_all(vmw_priv->dev); + ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, + par->max_height, &par->con, + &par->crtc, &init_mode); + if (ret) { + drm_modeset_unlock_all(vmw_priv->dev); + goto err_kms; + } + + info->var.xres = init_mode->hdisplay; + info->var.yres = init_mode->vdisplay; + drm_modeset_unlock_all(vmw_priv->dev); + /* * Create buffers and alloc memory */ - par->vmalloc = vmalloc(fb_size); + par->vmalloc = vzalloc(fb_size); if (unlikely(par->vmalloc == NULL)) { ret = -ENOMEM; goto err_free; } - ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo); - if (unlikely(ret != 0)) - goto err_free; - - ret = ttm_bo_kmap(&par->vmw_bo->base, - 0, - par->vmw_bo->base.num_pages, - &par->map); - if (unlikely(ret != 0)) - goto err_unref; - par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); - par->bo_size = fb_size; - /* * Fixed and var */ @@ -490,7 +706,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) info->fix.smem_len = fb_size; info->pseudo_palette = par->pseudo_palette; - info->screen_base = par->vmalloc; + info->screen_base = (char __iomem *)par->vmalloc; info->screen_size = fb_size; info->flags = FBINFO_DEFAULT; @@ -508,18 +724,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv) info->var.xres_virtual = fb_width; info->var.yres_virtual = fb_height; - info->var.bits_per_pixel = par->bpp; + info->var.bits_per_pixel = fb_bpp; info->var.xoffset = 0; info->var.yoffset = 0; info->var.activate = FB_ACTIVATE_NOW; info->var.height = -1; info->var.width = -1; - info->var.xres = initial_width; - info->var.yres = initial_height; - /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; @@ -535,6 +747,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) par->dirty.y1 = par->dirty.y2 = 0; par->dirty.active = true; spin_lock_init(&par->dirty.lock); + mutex_init(&par->bo_mutex); info->fbdefio = &vmw_defio; fb_deferred_io_init(info); @@ -542,16 +755,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv) if (unlikely(ret != 0)) goto err_defio; + vmw_fb_set_par(info); + return 0; err_defio: fb_deferred_io_cleanup(info); err_aper: - ttm_bo_kunmap(&par->map); -err_unref: - ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); err_free: vfree(par->vmalloc); +err_kms: framebuffer_release(info); vmw_priv->fb_info = NULL; @@ -562,22 +775,18 @@ int vmw_fb_close(struct vmw_private *vmw_priv) { struct fb_info *info; struct vmw_fb_par *par; - struct ttm_buffer_object *bo; if (!vmw_priv->fb_info) return 0; info = vmw_priv->fb_info; par = info->par; - bo = &par->vmw_bo->base; - par->vmw_bo = NULL; /* ??? order */ fb_deferred_io_cleanup(info); unregister_framebuffer(info); - ttm_bo_kunmap(&par->map); - ttm_bo_unref(&bo); + (void) vmw_fb_kms_detach(par, true, true); vfree(par->vmalloc); framebuffer_release(info); @@ -603,10 +812,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv) flush_delayed_work(&info->deferred_work); - par->bo_ptr = NULL; - ttm_bo_kunmap(&par->map); - - vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false); + mutex_lock(&par->bo_mutex); + (void) vmw_fb_kms_detach(par, true, false); + mutex_unlock(&par->bo_mutex); return 0; } @@ -616,8 +824,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv) struct fb_info *info; struct vmw_fb_par *par; unsigned long flags; - bool dummy; - int ret; if (!vmw_priv->fb_info) return -EINVAL; @@ -625,38 +831,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv) info = vmw_priv->fb_info; par = info->par; - /* we are already active */ - if (par->bo_ptr != NULL) - return 0; - - /* Make sure that all overlays are stoped when we take over */ - vmw_overlay_stop_all(vmw_priv); - - ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false); - if (unlikely(ret != 0)) { - DRM_ERROR("could not move buffer to start of VRAM\n"); - goto err_no_buffer; - } - - ret = ttm_bo_kmap(&par->vmw_bo->base, - 0, - par->vmw_bo->base.num_pages, - &par->map); - BUG_ON(ret != 0); - par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy); - + vmw_fb_set_par(info); spin_lock_irqsave(&par->dirty.lock, flags); par->dirty.active = true; spin_unlock_irqrestore(&par->dirty.lock, flags); - -err_no_buffer: - vmw_fb_set_par(info); - - vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres); - - /* If there already was stuff dirty we wont - * schedule a new work, so lets do it now */ - schedule_delayed_work(&info->deferred_work, 0); - + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 945f1e0dad92..567ddede51d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) struct vmw_fence_manager *fman = fman_from_fence(fence); struct vmw_private *dev_priv = fman->dev_priv; - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); if (seqno - fence->base.seqno < VMW_FENCE_WRAP) return false; @@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, u32 passed_seqno) { u32 goal_seqno; - __le32 __iomem *fifo_mem; + u32 __iomem *fifo_mem; struct vmw_fence_obj *fence; if (likely(!fman->seqno_valid)) @@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) { struct vmw_fence_manager *fman = fman_from_fence(fence); u32 goal_seqno; - __le32 __iomem *fifo_mem; + u32 __iomem *fifo_mem; if (fence_is_signaled_locked(&fence->base)) return false; @@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman) struct list_head action_list; bool needs_rerun; uint32_t seqno, new_seqno; - __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; + u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); rerun: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 26a4add39208..8be6c29f5eb5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 39f2b03888e7..80c40c31d4f8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -29,9 +29,14 @@ #include <drm/drmP.h> #include <drm/ttm/ttm_placement.h> +struct vmw_temp_set_context { + SVGA3dCmdHeader header; + SVGA3dCmdDXTempSetContext body; +}; + bool vmw_fifo_have_3d(struct vmw_private *dev_priv) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t fifo_min, hwversion; const struct vmw_fifo_state *fifo = &dev_priv->fifo; @@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) if (hwversion < SVGA3D_HWVERSION_WS8_B1) return false; - /* Non-Screen Object path does not support surfaces */ - if (!dev_priv->sou_priv) + /* Legacy Display Unit does not support surfaces */ + if (dev_priv->active_display_unit == vmw_du_legacy) return false; return true; @@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t caps; if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) @@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t max; uint32_t min; - uint32_t dummy; + fifo->dx = false; fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; fifo->static_buffer = vmalloc(fifo->static_buffer_size); if (unlikely(fifo->static_buffer == NULL)) @@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mutex_init(&fifo->fifo_mutex); init_rwsem(&fifo->rwsem); - /* - * Allow mapping the first page read-only to user-space. - */ - DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); @@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); - vmw_write(dev_priv, SVGA_REG_ENABLE, 1); + + vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | + SVGA_REG_ENABLE_HIDE); + vmw_write(dev_priv, SVGA_REG_TRACES, 0); min = 4; if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) @@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); vmw_marker_queue_init(&fifo->marker_queue); - return vmw_fifo_send_fence(dev_priv, &dummy); + + return 0; } void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; static DEFINE_SPINLOCK(ping_lock); unsigned long irq_flags; @@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) @@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); @@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, * Returns: * Pointer to the fifo, or null on error (possible hardware hang). */ -void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) +static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, + uint32_t bytes) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t max; uint32_t min; uint32_t next_cmd; @@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) if (reserveable) iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED); - return fifo_mem + (next_cmd >> 2); + return (void __force *) (fifo_mem + + (next_cmd >> 2)); } else { need_bounce = true; } @@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) out_err: fifo_state->reserved_size = 0; mutex_unlock(&fifo_state->fifo_mutex); + return NULL; } +void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, + int ctx_id) +{ + void *ret; + + if (dev_priv->cman) + ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, + ctx_id, false, NULL); + else if (ctx_id == SVGA3D_INVALID_ID) + ret = vmw_local_fifo_reserve(dev_priv, bytes); + else { + WARN_ON("Command buffer has not been allocated.\n"); + ret = NULL; + } + if (IS_ERR_OR_NULL(ret)) { + DRM_ERROR("Fifo reserve failure of %u bytes.\n", + (unsigned) bytes); + dump_stack(); + return NULL; + } + + return ret; +} + static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, - __le32 __iomem *fifo_mem, + u32 __iomem *fifo_mem, uint32_t next_cmd, uint32_t max, uint32_t min, uint32_t bytes) { @@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, } static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, - __le32 __iomem *fifo_mem, + u32 __iomem *fifo_mem, uint32_t next_cmd, uint32_t max, uint32_t min, uint32_t bytes) { @@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, } } -void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) +static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; + if (fifo_state->dx) + bytes += sizeof(struct vmw_temp_set_context); + + fifo_state->dx = false; BUG_ON((bytes & 3) != 0); BUG_ON(bytes > fifo_state->reserved_size); @@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) mutex_unlock(&fifo_state->fifo_mutex); } +void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) +{ + if (dev_priv->cman) + vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false); + else + vmw_local_fifo_commit(dev_priv, bytes); +} + + +/** + * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands. + * + * @dev_priv: Pointer to device private structure. + * @bytes: Number of bytes to commit. + */ +void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) +{ + if (dev_priv->cman) + vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); + else + vmw_local_fifo_commit(dev_priv, bytes); +} + +/** + * vmw_fifo_flush - Flush any buffered commands and make sure command processing + * starts. + * + * @dev_priv: Pointer to device private structure. + * @interruptible: Whether to wait interruptible if function needs to sleep. + */ +int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible) +{ + might_sleep(); + + if (dev_priv->cman) + return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible); + else + return 0; +} + int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; struct svga_fifo_cmd_fence *cmd_fence; - void *fm; + u32 *fm; int ret = 0; - uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); + uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence); fm = vmw_fifo_reserve(dev_priv, bytes); if (unlikely(fm == NULL)) { @@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) return 0; } - *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); - cmd_fence = (struct svga_fifo_cmd_fence *) - ((unsigned long)fm + sizeof(__le32)); - - iowrite32(*seqno, &cmd_fence->fence); - vmw_fifo_commit(dev_priv, bytes); + *fm++ = SVGA_CMD_FENCE; + cmd_fence = (struct svga_fifo_cmd_fence *) fm; + cmd_fence->fence = *seqno; + vmw_fifo_commit_flush(dev_priv, bytes); (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); vmw_update_seqno(dev_priv, fifo_state); @@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, * without writing to the query result structure. */ - struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; + struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; struct { SVGA3dCmdHeader header; SVGA3dCmdWaitForQuery body; @@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, * without writing to the query result structure. */ - struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; + struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; struct { SVGA3dCmdHeader header; SVGA3dCmdWaitForGBQuery body; @@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); } + +void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) +{ + return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 61d8d803199f..66ffa1d4759c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 69c8ce23123c..0a970afed93b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -28,6 +28,7 @@ #include "vmwgfx_drv.h" #include <drm/vmwgfx_drm.h> #include "vmwgfx_kms.h" +#include "device_include/svga3d_caps.h" struct svga_3d_compat_cap { SVGA3dCapsRecordHeader header; @@ -63,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, break; case DRM_VMW_PARAM_FIFO_HW_VERSION: { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; const struct vmw_fifo_state *fifo = &dev_priv->fifo; if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { @@ -105,6 +106,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_MAX_MOB_SIZE: param->value = dev_priv->max_mob_size; break; + case DRM_VMW_PARAM_SCREEN_TARGET: + param->value = + (dev_priv->active_display_unit == vmw_du_screen_target); + break; + case DRM_VMW_PARAM_DX: + param->value = dev_priv->has_dx; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); @@ -154,7 +162,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_get_3d_cap_arg *) data; struct vmw_private *dev_priv = vmw_priv(dev); uint32_t size; - __le32 __iomem *fifo_mem; + u32 __iomem *fifo_mem; void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); void *bounce; int ret; @@ -235,7 +243,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, int ret; num_clips = arg->num_clips; - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; @@ -318,7 +326,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, int ret; num_clips = arg->num_clips; - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 9fe9827ee499..9498a5e33c12 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) wake_up_all(&dev_priv->fifo_queue); + if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | + SVGA_IRQFLAG_ERROR)) + vmw_cmdbuf_tasklet_schedule(dev_priv->cman); return IRQ_HANDLED; } @@ -69,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) void vmw_update_seqno(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo_state) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); if (dev_priv->last_read_seqno != seqno) { @@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, * Block command submission while waiting for idle. */ - if (fifo_idle) + if (fifo_idle) { down_read(&fifo_state->rwsem); + if (dev_priv->cman) { + ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, + 10*HZ); + if (ret) + goto out_err; + } + } + signal_seq = atomic_read(&dev_priv->marker_seq); ret = 0; @@ -167,10 +178,11 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, } finish_wait(&dev_priv->fence_queue, &__wait); if (ret == 0 && fifo_idle) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); } wake_up_all(&dev_priv->fence_queue); +out_err: if (fifo_idle) up_read(&fifo_state->rwsem); @@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev) status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); } + +void vmw_generic_waiter_add(struct vmw_private *dev_priv, + u32 flag, int *waiter_count) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + if ((*waiter_count)++ == 0) { + outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + dev_priv->irq_mask |= flag; + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); +} + +void vmw_generic_waiter_remove(struct vmw_private *dev_priv, + u32 flag, int *waiter_count) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + if (--(*waiter_count) == 0) { + dev_priv->irq_mask &= ~flag; + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 2adc11bc0920..61fb7f3de311 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,45 +31,7 @@ /* Might need a hrtimer here? */ #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) - -struct vmw_clip_rect { - int x1, x2, y1, y2; -}; - -/** - * Clip @num_rects number of @rects against @clip storing the - * results in @out_rects and the number of passed rects in @out_num. - */ -static void vmw_clip_cliprects(struct drm_clip_rect *rects, - int num_rects, - struct vmw_clip_rect clip, - SVGASignedRect *out_rects, - int *out_num) -{ - int i, k; - - for (i = 0, k = 0; i < num_rects; i++) { - int x1 = max_t(int, clip.x1, rects[i].x1); - int y1 = max_t(int, clip.y1, rects[i].y1); - int x2 = min_t(int, clip.x2, rects[i].x2); - int y2 = min_t(int, clip.y2, rects[i].y2); - - if (x1 >= x2) - continue; - if (y1 >= y2) - continue; - - out_rects[k].left = x1; - out_rects[k].top = y1; - out_rects[k].right = x2; - out_rects[k].bottom = y2; - k++; - } - - *out_num = k; -} - -void vmw_display_unit_cleanup(struct vmw_display_unit *du) +void vmw_du_cleanup(struct vmw_display_unit *du) { if (du->cursor_surface) vmw_surface_unreference(&du->cursor_surface); @@ -109,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv, memcpy(&cmd[1], image, image_size); - cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); - cmd->cursor.id = cpu_to_le32(0); - cmd->cursor.width = cpu_to_le32(width); - cmd->cursor.height = cpu_to_le32(height); - cmd->cursor.hotspotX = cpu_to_le32(hotspotX); - cmd->cursor.hotspotY = cpu_to_le32(hotspotY); + cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; + cmd->cursor.id = 0; + cmd->cursor.width = width; + cmd->cursor.height = height; + cmd->cursor.hotspotX = hotspotX; + cmd->cursor.hotspotY = hotspotY; vmw_fifo_commit(dev_priv, cmd_size); @@ -161,7 +123,7 @@ err_unreserve: void vmw_cursor_update_position(struct vmw_private *dev_priv, bool show, int x, int y) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 __iomem *fifo_mem = dev_priv->mmio_virt; uint32_t count; iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); @@ -367,15 +329,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, srf->snooper.age++; - /* we can't call this function from this function since execbuf has - * reserved fifo space. - * - * if (srf->snooper.crtc) - * vmw_ldu_crtc_cursor_update_image(dev_priv, - * srf->snooper.image, 64, 64, - * du->hotspot_x, du->hotspot_y); - */ - ttm_bo_kunmap(&map); err_unreserve: ttm_bo_unreserve(bo); @@ -412,183 +365,19 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) * Surface framebuffer code */ -#define vmw_framebuffer_to_vfbs(x) \ - container_of(x, struct vmw_framebuffer_surface, base.base) - -struct vmw_framebuffer_surface { - struct vmw_framebuffer base; - struct vmw_surface *surface; - struct vmw_dma_buffer *buffer; - struct list_head head; - struct drm_master *master; -}; - static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); - struct vmw_master *vmaster = vmw_master(vfbs->master); - - mutex_lock(&vmaster->fb_surf_mutex); - list_del(&vfbs->head); - mutex_unlock(&vmaster->fb_surf_mutex); - - drm_master_put(&vfbs->master); drm_framebuffer_cleanup(framebuffer); vmw_surface_unreference(&vfbs->surface); - ttm_base_object_unref(&vfbs->base.user_obj); + if (vfbs->base.user_obj) + ttm_base_object_unref(&vfbs->base.user_obj); kfree(vfbs); } -static int do_surface_dirty_sou(struct vmw_private *dev_priv, - struct drm_file *file_priv, - struct vmw_framebuffer *framebuffer, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips, int inc, - struct vmw_fence_obj **out_fence) -{ - struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; - struct drm_clip_rect *clips_ptr; - struct drm_clip_rect *tmp; - struct drm_crtc *crtc; - size_t fifo_size; - int i, num_units; - int ret = 0; /* silence warning */ - int left, right, top, bottom; - - struct { - SVGA3dCmdHeader header; - SVGA3dCmdBlitSurfaceToScreen body; - } *cmd; - SVGASignedRect *blits; - - num_units = 0; - list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, - head) { - if (crtc->primary->fb != &framebuffer->base) - continue; - units[num_units++] = vmw_crtc_to_du(crtc); - } - - BUG_ON(!clips || !num_clips); - - tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL); - if (unlikely(tmp == NULL)) { - DRM_ERROR("Temporary cliprect memory alloc failed.\n"); - return -ENOMEM; - } - - fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; - cmd = kzalloc(fifo_size, GFP_KERNEL); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Temporary fifo memory alloc failed.\n"); - ret = -ENOMEM; - goto out_free_tmp; - } - - /* setup blits pointer */ - blits = (SVGASignedRect *)&cmd[1]; - - /* initial clip region */ - left = clips->x1; - right = clips->x2; - top = clips->y1; - bottom = clips->y2; - - /* skip the first clip rect */ - for (i = 1, clips_ptr = clips + inc; - i < num_clips; i++, clips_ptr += inc) { - left = min_t(int, left, (int)clips_ptr->x1); - right = max_t(int, right, (int)clips_ptr->x2); - top = min_t(int, top, (int)clips_ptr->y1); - bottom = max_t(int, bottom, (int)clips_ptr->y2); - } - - /* only need to do this once */ - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); - cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); - - cmd->body.srcRect.left = left; - cmd->body.srcRect.right = right; - cmd->body.srcRect.top = top; - cmd->body.srcRect.bottom = bottom; - - clips_ptr = clips; - for (i = 0; i < num_clips; i++, clips_ptr += inc) { - tmp[i].x1 = clips_ptr->x1 - left; - tmp[i].x2 = clips_ptr->x2 - left; - tmp[i].y1 = clips_ptr->y1 - top; - tmp[i].y2 = clips_ptr->y2 - top; - } - - /* do per unit writing, reuse fifo for each */ - for (i = 0; i < num_units; i++) { - struct vmw_display_unit *unit = units[i]; - struct vmw_clip_rect clip; - int num; - - clip.x1 = left - unit->crtc.x; - clip.y1 = top - unit->crtc.y; - clip.x2 = right - unit->crtc.x; - clip.y2 = bottom - unit->crtc.y; - - /* skip any crtcs that misses the clip region */ - if (clip.x1 >= unit->crtc.mode.hdisplay || - clip.y1 >= unit->crtc.mode.vdisplay || - clip.x2 <= 0 || clip.y2 <= 0) - continue; - - /* - * In order for the clip rects to be correctly scaled - * the src and dest rects needs to be the same size. - */ - cmd->body.destRect.left = clip.x1; - cmd->body.destRect.right = clip.x2; - cmd->body.destRect.top = clip.y1; - cmd->body.destRect.bottom = clip.y2; - - /* create a clip rect of the crtc in dest coords */ - clip.x2 = unit->crtc.mode.hdisplay - clip.x1; - clip.y2 = unit->crtc.mode.vdisplay - clip.y1; - clip.x1 = 0 - clip.x1; - clip.y1 = 0 - clip.y1; - - /* need to reset sid as it is changed by execbuf */ - cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle); - cmd->body.destScreenId = unit->unit; - - /* clip and write blits to cmd stream */ - vmw_clip_cliprects(tmp, num_clips, clip, blits, &num); - - /* if no cliprects hit skip this */ - if (num == 0) - continue; - - /* only return the last fence */ - if (out_fence && *out_fence) - vmw_fence_obj_unreference(out_fence); - - /* recalculate package length */ - fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; - cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); - ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, - fifo_size, 0, NULL, out_fence); - - if (unlikely(ret != 0)) - break; - } - - - kfree(cmd); -out_free_tmp: - kfree(tmp); - - return ret; -} - static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, unsigned color, @@ -601,11 +390,8 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, struct drm_clip_rect norect; int ret, inc = 1; - if (unlikely(vfbs->master != file_priv->master)) - return -EINVAL; - - /* Require ScreenObject support for 3D */ - if (!dev_priv->sou_priv) + /* Legacy Display Unit does not support 3D */ + if (dev_priv->active_display_unit == vmw_du_legacy) return -EINVAL; drm_modeset_lock_all(dev_priv->dev); @@ -627,10 +413,16 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, inc = 2; /* skip source rects */ } - ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, - flags, color, - clips, num_clips, inc, NULL); + if (dev_priv->active_display_unit == vmw_du_screen_object) + ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base, + clips, NULL, NULL, 0, 0, + num_clips, inc, NULL); + else + ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base, + clips, NULL, NULL, 0, 0, + num_clips, inc, NULL); + vmw_fifo_flush(dev_priv, false); ttm_read_unlock(&dev_priv->reservation_sem); drm_modeset_unlock_all(dev_priv->dev); @@ -638,27 +430,66 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, return 0; } +/** + * vmw_kms_readback - Perform a readback from the screen system to + * a dma-buffer backed framebuffer. + * + * @dev_priv: Pointer to the device private structure. + * @file_priv: Pointer to a struct drm_file identifying the caller. + * Must be set to NULL if @user_fence_rep is NULL. + * @vfb: Pointer to the dma-buffer backed framebuffer. + * @user_fence_rep: User-space provided structure for fence information. + * Must be set to non-NULL if @file_priv is non-NULL. + * @vclips: Array of clip rects. + * @num_clips: Number of clip rects in @vclips. + * + * Returns 0 on success, negative error code on failure. -ERESTARTSYS if + * interrupted. + */ +int vmw_kms_readback(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_vmw_rect *vclips, + uint32_t num_clips) +{ + switch (dev_priv->active_display_unit) { + case vmw_du_screen_object: + return vmw_kms_sou_readback(dev_priv, file_priv, vfb, + user_fence_rep, vclips, num_clips); + case vmw_du_screen_target: + return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, + user_fence_rep, NULL, vclips, num_clips, + 1, false, true); + default: + WARN_ONCE(true, + "Readback called with invalid display system.\n"); +} + + return -ENOSYS; +} + + static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { .destroy = vmw_framebuffer_surface_destroy, .dirty = vmw_framebuffer_surface_dirty, }; static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, - struct drm_file *file_priv, struct vmw_surface *surface, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd - *mode_cmd) + *mode_cmd, + bool is_dmabuf_proxy) { struct drm_device *dev = dev_priv->dev; struct vmw_framebuffer_surface *vfbs; enum SVGA3dSurfaceFormat format; - struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; - /* 3D is only supported on HWv8 hosts which supports screen objects */ - if (!dev_priv->sou_priv) + /* 3D is only supported on HWv8 and newer hosts */ + if (dev_priv->active_display_unit == vmw_du_legacy) return -ENOSYS; /* @@ -692,15 +523,16 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, case 15: format = SVGA3D_A1R5G5B5; break; - case 8: - format = SVGA3D_LUMINANCE8; - break; default: DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); return -EINVAL; } - if (unlikely(format != surface->format)) { + /* + * For DX, surface format validation is done when surface->scanout + * is set. + */ + if (!dev_priv->has_dx && format != surface->format) { DRM_ERROR("Invalid surface format for requested mode.\n"); return -EINVAL; } @@ -711,38 +543,27 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, goto out_err1; } - if (!vmw_surface_reference(surface)) { - DRM_ERROR("failed to reference surface %p\n", surface); - ret = -EINVAL; - goto out_err2; - } - /* XXX get the first 3 from the surface info */ vfbs->base.base.bits_per_pixel = mode_cmd->bpp; vfbs->base.base.pitches[0] = mode_cmd->pitch; vfbs->base.base.depth = mode_cmd->depth; vfbs->base.base.width = mode_cmd->width; vfbs->base.base.height = mode_cmd->height; - vfbs->surface = surface; + vfbs->surface = vmw_surface_reference(surface); vfbs->base.user_handle = mode_cmd->handle; - vfbs->master = drm_master_get(file_priv->master); - - mutex_lock(&vmaster->fb_surf_mutex); - list_add_tail(&vfbs->head, &vmaster->fb_surf); - mutex_unlock(&vmaster->fb_surf_mutex); + vfbs->is_dmabuf_proxy = is_dmabuf_proxy; *out = &vfbs->base; ret = drm_framebuffer_init(dev, &vfbs->base.base, &vmw_framebuffer_surface_funcs); if (ret) - goto out_err3; + goto out_err2; return 0; -out_err3: - vmw_surface_unreference(&surface); out_err2: + vmw_surface_unreference(&surface); kfree(vfbs); out_err1: return ret; @@ -752,14 +573,6 @@ out_err1: * Dmabuf framebuffer code */ -#define vmw_framebuffer_to_vfbd(x) \ - container_of(x, struct vmw_framebuffer_dmabuf, base.base) - -struct vmw_framebuffer_dmabuf { - struct vmw_framebuffer base; - struct vmw_dma_buffer *buffer; -}; - static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_dmabuf *vfbd = @@ -767,185 +580,12 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) drm_framebuffer_cleanup(framebuffer); vmw_dmabuf_unreference(&vfbd->buffer); - ttm_base_object_unref(&vfbd->base.user_obj); + if (vfbd->base.user_obj) + ttm_base_object_unref(&vfbd->base.user_obj); kfree(vfbd); } -static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips, int increment) -{ - size_t fifo_size; - int i; - - struct { - uint32_t header; - SVGAFifoCmdUpdate body; - } *cmd; - - fifo_size = sizeof(*cmd) * num_clips; - cmd = vmw_fifo_reserve(dev_priv, fifo_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Fifo reserve failed.\n"); - return -ENOMEM; - } - - memset(cmd, 0, fifo_size); - for (i = 0; i < num_clips; i++, clips += increment) { - cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); - cmd[i].body.x = cpu_to_le32(clips->x1); - cmd[i].body.y = cpu_to_le32(clips->y1); - cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); - cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); - } - - vmw_fifo_commit(dev_priv, fifo_size); - return 0; -} - -static int do_dmabuf_define_gmrfb(struct drm_file *file_priv, - struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer) -{ - int depth = framebuffer->base.depth; - size_t fifo_size; - int ret; - - struct { - uint32_t header; - SVGAFifoCmdDefineGMRFB body; - } *cmd; - - /* Emulate RGBA support, contrary to svga_reg.h this is not - * supported by hosts. This is only a problem if we are reading - * this value later and expecting what we uploaded back. - */ - if (depth == 32) - depth = 24; - - fifo_size = sizeof(*cmd); - cmd = kmalloc(fifo_size, GFP_KERNEL); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); - return -ENOMEM; - } - - memset(cmd, 0, fifo_size); - cmd->header = SVGA_CMD_DEFINE_GMRFB; - cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; - cmd->body.format.colorDepth = depth; - cmd->body.format.reserved = 0; - cmd->body.bytesPerLine = framebuffer->base.pitches[0]; - cmd->body.ptr.gmrId = framebuffer->user_handle; - cmd->body.ptr.offset = 0; - - ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, - fifo_size, 0, NULL, NULL); - - kfree(cmd); - - return ret; -} - -static int do_dmabuf_dirty_sou(struct drm_file *file_priv, - struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips, int increment, - struct vmw_fence_obj **out_fence) -{ - struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; - struct drm_clip_rect *clips_ptr; - int i, k, num_units, ret; - struct drm_crtc *crtc; - size_t fifo_size; - - struct { - uint32_t header; - SVGAFifoCmdBlitGMRFBToScreen body; - } *blits; - - ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer); - if (unlikely(ret != 0)) - return ret; /* define_gmrfb prints warnings */ - - fifo_size = sizeof(*blits) * num_clips; - blits = kmalloc(fifo_size, GFP_KERNEL); - if (unlikely(blits == NULL)) { - DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); - return -ENOMEM; - } - - num_units = 0; - list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { - if (crtc->primary->fb != &framebuffer->base) - continue; - units[num_units++] = vmw_crtc_to_du(crtc); - } - - for (k = 0; k < num_units; k++) { - struct vmw_display_unit *unit = units[k]; - int hit_num = 0; - - clips_ptr = clips; - for (i = 0; i < num_clips; i++, clips_ptr += increment) { - int clip_x1 = clips_ptr->x1 - unit->crtc.x; - int clip_y1 = clips_ptr->y1 - unit->crtc.y; - int clip_x2 = clips_ptr->x2 - unit->crtc.x; - int clip_y2 = clips_ptr->y2 - unit->crtc.y; - int move_x, move_y; - - /* skip any crtcs that misses the clip region */ - if (clip_x1 >= unit->crtc.mode.hdisplay || - clip_y1 >= unit->crtc.mode.vdisplay || - clip_x2 <= 0 || clip_y2 <= 0) - continue; - - /* clip size to crtc size */ - clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay); - clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay); - - /* translate both src and dest to bring clip into screen */ - move_x = min_t(int, clip_x1, 0); - move_y = min_t(int, clip_y1, 0); - - /* actual translate done here */ - blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; - blits[hit_num].body.destScreenId = unit->unit; - blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x; - blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y; - blits[hit_num].body.destRect.left = clip_x1 - move_x; - blits[hit_num].body.destRect.top = clip_y1 - move_y; - blits[hit_num].body.destRect.right = clip_x2; - blits[hit_num].body.destRect.bottom = clip_y2; - hit_num++; - } - - /* no clips hit the crtc */ - if (hit_num == 0) - continue; - - /* only return the last fence */ - if (out_fence && *out_fence) - vmw_fence_obj_unreference(out_fence); - - fifo_size = sizeof(*blits) * hit_num; - ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits, - fifo_size, 0, NULL, out_fence); - - if (unlikely(ret != 0)) - break; - } - - kfree(blits); - - return ret; -} - static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, unsigned color, @@ -977,16 +617,29 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, increment = 2; } - if (dev_priv->ldu_priv) { - ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, - flags, color, - clips, num_clips, increment); - } else { - ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, - flags, color, - clips, num_clips, increment, NULL); + switch (dev_priv->active_display_unit) { + case vmw_du_screen_target: + ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL, + clips, NULL, num_clips, increment, + true, true); + break; + case vmw_du_screen_object: + ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base, + clips, num_clips, increment, + true, + NULL); + break; + case vmw_du_legacy: + ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0, + clips, num_clips, increment); + break; + default: + ret = -EINVAL; + WARN_ONCE(true, "Dirty called with invalid display system.\n"); + break; } + vmw_fifo_flush(dev_priv, false); ttm_read_unlock(&dev_priv->reservation_sem); drm_modeset_unlock_all(dev_priv->dev); @@ -1002,41 +655,133 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { /** * Pin the dmabuffer to the start of vram. */ -static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) +static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) { struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_framebuffer_dmabuf *vfbd = - vmw_framebuffer_to_vfbd(&vfb->base); + struct vmw_dma_buffer *buf; int ret; - /* This code should not be used with screen objects */ - BUG_ON(dev_priv->sou_priv); - - vmw_overlay_pause_all(dev_priv); + buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; - ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); - - vmw_overlay_resume_all(dev_priv); + if (!buf) + return 0; - WARN_ON(ret != 0); + switch (dev_priv->active_display_unit) { + case vmw_du_legacy: + vmw_overlay_pause_all(dev_priv); + ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false); + vmw_overlay_resume_all(dev_priv); + break; + case vmw_du_screen_object: + case vmw_du_screen_target: + if (vfb->dmabuf) + return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, + false); + + return vmw_dmabuf_pin_in_placement(dev_priv, buf, + &vmw_mob_placement, false); + default: + return -EINVAL; + } - return 0; + return ret; } -static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) +static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) { struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_framebuffer_dmabuf *vfbd = - vmw_framebuffer_to_vfbd(&vfb->base); + struct vmw_dma_buffer *buf; - if (!vfbd->buffer) { - WARN_ON(!vfbd->buffer); + buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; + + if (WARN_ON(!buf)) return 0; + + return vmw_dmabuf_unpin(dev_priv, buf, false); +} + +/** + * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf + * + * @dev: DRM device + * @mode_cmd: parameters for the new surface + * @dmabuf_mob: MOB backing the DMA buf + * @srf_out: newly created surface + * + * When the content FB is a DMA buf, we create a surface as a proxy to the + * same buffer. This way we can do a surface copy rather than a surface DMA. + * This is a more efficient approach + * + * RETURNS: + * 0 on success, error code otherwise + */ +static int vmw_create_dmabuf_proxy(struct drm_device *dev, + const struct drm_mode_fb_cmd *mode_cmd, + struct vmw_dma_buffer *dmabuf_mob, + struct vmw_surface **srf_out) +{ + uint32_t format; + struct drm_vmw_size content_base_size; + struct vmw_resource *res; + int ret; + + switch (mode_cmd->depth) { + case 32: + case 24: + format = SVGA3D_X8R8G8B8; + break; + + case 16: + case 15: + format = SVGA3D_R5G6B5; + break; + + case 8: + format = SVGA3D_P8; + break; + + default: + DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth); + return -EINVAL; } - return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false); + content_base_size.width = mode_cmd->width; + content_base_size.height = mode_cmd->height; + content_base_size.depth = 1; + + ret = vmw_surface_gb_priv_define(dev, + 0, /* kernel visible only */ + 0, /* flags */ + format, + true, /* can be a scanout buffer */ + 1, /* num of mip levels */ + 0, + 0, + content_base_size, + srf_out); + if (ret) { + DRM_ERROR("Failed to allocate proxy content buffer\n"); + return ret; + } + + res = &(*srf_out)->res; + + /* Reserve and switch the backing mob. */ + mutex_lock(&res->dev_priv->cmdbuf_mutex); + (void) vmw_resource_reserve(res, false, true); + vmw_dmabuf_unreference(&res->backup); + res->backup = vmw_dmabuf_reference(dmabuf_mob); + res->backup_offset = 0; + vmw_resource_unreserve(res, false, NULL, 0); + mutex_unlock(&res->dev_priv->cmdbuf_mutex); + + return 0; } + + static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, struct vmw_dma_buffer *dmabuf, struct vmw_framebuffer **out, @@ -1057,7 +802,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, } /* Limited framebuffer color depth support for screen objects */ - if (dev_priv->sou_priv) { + if (dev_priv->active_display_unit == vmw_du_screen_object) { switch (mode_cmd->depth) { case 32: case 24: @@ -1089,41 +834,96 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, goto out_err1; } - if (!vmw_dmabuf_reference(dmabuf)) { - DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); - ret = -EINVAL; - goto out_err2; - } - vfbd->base.base.bits_per_pixel = mode_cmd->bpp; vfbd->base.base.pitches[0] = mode_cmd->pitch; vfbd->base.base.depth = mode_cmd->depth; vfbd->base.base.width = mode_cmd->width; vfbd->base.base.height = mode_cmd->height; - if (!dev_priv->sou_priv) { - vfbd->base.pin = vmw_framebuffer_dmabuf_pin; - vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; - } vfbd->base.dmabuf = true; - vfbd->buffer = dmabuf; + vfbd->buffer = vmw_dmabuf_reference(dmabuf); vfbd->base.user_handle = mode_cmd->handle; *out = &vfbd->base; ret = drm_framebuffer_init(dev, &vfbd->base.base, &vmw_framebuffer_dmabuf_funcs); if (ret) - goto out_err3; + goto out_err2; return 0; -out_err3: - vmw_dmabuf_unreference(&dmabuf); out_err2: + vmw_dmabuf_unreference(&dmabuf); kfree(vfbd); out_err1: return ret; } +/** + * vmw_kms_new_framebuffer - Create a new framebuffer. + * + * @dev_priv: Pointer to device private struct. + * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around. + * Either @dmabuf or @surface must be NULL. + * @surface: Pointer to a surface to wrap the kms framebuffer around. + * Either @dmabuf or @surface must be NULL. + * @only_2d: No presents will occur to this dma buffer based framebuffer. This + * Helps the code to do some important optimizations. + * @mode_cmd: Frame-buffer metadata. + */ +struct vmw_framebuffer * +vmw_kms_new_framebuffer(struct vmw_private *dev_priv, + struct vmw_dma_buffer *dmabuf, + struct vmw_surface *surface, + bool only_2d, + const struct drm_mode_fb_cmd *mode_cmd) +{ + struct vmw_framebuffer *vfb = NULL; + bool is_dmabuf_proxy = false; + int ret; + + /* + * We cannot use the SurfaceDMA command in an non-accelerated VM, + * therefore, wrap the DMA buf in a surface so we can use the + * SurfaceCopy command. + */ + if (dmabuf && only_2d && + dev_priv->active_display_unit == vmw_du_screen_target) { + ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, + dmabuf, &surface); + if (ret) + return ERR_PTR(ret); + + is_dmabuf_proxy = true; + } + + /* Create the new framebuffer depending one what we have */ + if (surface) { + ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, + mode_cmd, + is_dmabuf_proxy); + + /* + * vmw_create_dmabuf_proxy() adds a reference that is no longer + * needed + */ + if (is_dmabuf_proxy) + vmw_surface_unreference(&surface); + } else if (dmabuf) { + ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb, + mode_cmd); + } else { + BUG(); + } + + if (ret) + return ERR_PTR(ret); + + vfb->pin = vmw_framebuffer_pin; + vfb->unpin = vmw_framebuffer_unpin; + + return vfb; +} + /* * Generic Kernel modesetting functions */ @@ -1157,7 +957,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, if (!vmw_kms_validate_mode_vram(dev_priv, mode_cmd.pitch, mode_cmd.height)) { - DRM_ERROR("VRAM size is too small for requested mode.\n"); + DRM_ERROR("Requested mode exceed bounding box limit.\n"); return ERR_PTR(-ENOMEM); } @@ -1187,15 +987,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, if (ret) goto err_out; - /* Create the new framebuffer depending one what we got back */ - if (bo) - ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, - &mode_cmd); - else if (surface) - ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, - surface, &vfb, &mode_cmd); - else - BUG(); + vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, + !(dev_priv->capabilities & SVGA_CAP_3D), + &mode_cmd); + if (IS_ERR(vfb)) { + ret = PTR_ERR(vfb); + goto err_out; + } err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ @@ -1218,6 +1016,21 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = { .fb_create = vmw_kms_fb_create, }; +static int vmw_kms_generic_present(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct vmw_surface *surface, + uint32_t sid, + int32_t destX, int32_t destY, + struct drm_vmw_rect *clips, + uint32_t num_clips) +{ + return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, + &surface->res, destX, destY, + num_clips, 1, NULL); +} + + int vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, @@ -1227,238 +1040,31 @@ int vmw_kms_present(struct vmw_private *dev_priv, struct drm_vmw_rect *clips, uint32_t num_clips) { - struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; - struct drm_clip_rect *tmp; - struct drm_crtc *crtc; - size_t fifo_size; - int i, k, num_units; - int ret = 0; /* silence warning */ - int left, right, top, bottom; - - struct { - SVGA3dCmdHeader header; - SVGA3dCmdBlitSurfaceToScreen body; - } *cmd; - SVGASignedRect *blits; - - num_units = 0; - list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { - if (crtc->primary->fb != &vfb->base) - continue; - units[num_units++] = vmw_crtc_to_du(crtc); - } - - BUG_ON(surface == NULL); - BUG_ON(!clips || !num_clips); - - tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL); - if (unlikely(tmp == NULL)) { - DRM_ERROR("Temporary cliprect memory alloc failed.\n"); - return -ENOMEM; - } - - fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; - cmd = kmalloc(fifo_size, GFP_KERNEL); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed to allocate temporary fifo memory.\n"); - ret = -ENOMEM; - goto out_free_tmp; - } - - left = clips->x; - right = clips->x + clips->w; - top = clips->y; - bottom = clips->y + clips->h; - - for (i = 1; i < num_clips; i++) { - left = min_t(int, left, (int)clips[i].x); - right = max_t(int, right, (int)clips[i].x + clips[i].w); - top = min_t(int, top, (int)clips[i].y); - bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h); - } - - /* only need to do this once */ - memset(cmd, 0, fifo_size); - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); - - blits = (SVGASignedRect *)&cmd[1]; - - cmd->body.srcRect.left = left; - cmd->body.srcRect.right = right; - cmd->body.srcRect.top = top; - cmd->body.srcRect.bottom = bottom; - - for (i = 0; i < num_clips; i++) { - tmp[i].x1 = clips[i].x - left; - tmp[i].x2 = clips[i].x + clips[i].w - left; - tmp[i].y1 = clips[i].y - top; - tmp[i].y2 = clips[i].y + clips[i].h - top; - } - - for (k = 0; k < num_units; k++) { - struct vmw_display_unit *unit = units[k]; - struct vmw_clip_rect clip; - int num; - - clip.x1 = left + destX - unit->crtc.x; - clip.y1 = top + destY - unit->crtc.y; - clip.x2 = right + destX - unit->crtc.x; - clip.y2 = bottom + destY - unit->crtc.y; - - /* skip any crtcs that misses the clip region */ - if (clip.x1 >= unit->crtc.mode.hdisplay || - clip.y1 >= unit->crtc.mode.vdisplay || - clip.x2 <= 0 || clip.y2 <= 0) - continue; - - /* - * In order for the clip rects to be correctly scaled - * the src and dest rects needs to be the same size. - */ - cmd->body.destRect.left = clip.x1; - cmd->body.destRect.right = clip.x2; - cmd->body.destRect.top = clip.y1; - cmd->body.destRect.bottom = clip.y2; - - /* create a clip rect of the crtc in dest coords */ - clip.x2 = unit->crtc.mode.hdisplay - clip.x1; - clip.y2 = unit->crtc.mode.vdisplay - clip.y1; - clip.x1 = 0 - clip.x1; - clip.y1 = 0 - clip.y1; - - /* need to reset sid as it is changed by execbuf */ - cmd->body.srcImage.sid = sid; - cmd->body.destScreenId = unit->unit; - - /* clip and write blits to cmd stream */ - vmw_clip_cliprects(tmp, num_clips, clip, blits, &num); - - /* if no cliprects hit skip this */ - if (num == 0) - continue; - - /* recalculate package length */ - fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; - cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); - ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, - fifo_size, 0, NULL, NULL); - - if (unlikely(ret != 0)) - break; - } - - kfree(cmd); -out_free_tmp: - kfree(tmp); - - return ret; -} - -int vmw_kms_readback(struct vmw_private *dev_priv, - struct drm_file *file_priv, - struct vmw_framebuffer *vfb, - struct drm_vmw_fence_rep __user *user_fence_rep, - struct drm_vmw_rect *clips, - uint32_t num_clips) -{ - struct vmw_framebuffer_dmabuf *vfbd = - vmw_framebuffer_to_vfbd(&vfb->base); - struct vmw_dma_buffer *dmabuf = vfbd->buffer; - struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; - struct drm_crtc *crtc; - size_t fifo_size; - int i, k, ret, num_units, blits_pos; - - struct { - uint32_t header; - SVGAFifoCmdDefineGMRFB body; - } *cmd; - struct { - uint32_t header; - SVGAFifoCmdBlitScreenToGMRFB body; - } *blits; - - num_units = 0; - list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { - if (crtc->primary->fb != &vfb->base) - continue; - units[num_units++] = vmw_crtc_to_du(crtc); - } - - BUG_ON(dmabuf == NULL); - BUG_ON(!clips || !num_clips); - - /* take a safe guess at fifo size */ - fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units; - cmd = kmalloc(fifo_size, GFP_KERNEL); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed to allocate temporary fifo memory.\n"); - return -ENOMEM; - } - - memset(cmd, 0, fifo_size); - cmd->header = SVGA_CMD_DEFINE_GMRFB; - cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel; - cmd->body.format.colorDepth = vfb->base.depth; - cmd->body.format.reserved = 0; - cmd->body.bytesPerLine = vfb->base.pitches[0]; - cmd->body.ptr.gmrId = vfb->user_handle; - cmd->body.ptr.offset = 0; - - blits = (void *)&cmd[1]; - blits_pos = 0; - for (i = 0; i < num_units; i++) { - struct drm_vmw_rect *c = clips; - for (k = 0; k < num_clips; k++, c++) { - /* transform clip coords to crtc origin based coords */ - int clip_x1 = c->x - units[i]->crtc.x; - int clip_x2 = c->x - units[i]->crtc.x + c->w; - int clip_y1 = c->y - units[i]->crtc.y; - int clip_y2 = c->y - units[i]->crtc.y + c->h; - int dest_x = c->x; - int dest_y = c->y; - - /* compensate for clipping, we negate - * a negative number and add that. - */ - if (clip_x1 < 0) - dest_x += -clip_x1; - if (clip_y1 < 0) - dest_y += -clip_y1; - - /* clip */ - clip_x1 = max(clip_x1, 0); - clip_y1 = max(clip_y1, 0); - clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay); - clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay); - - /* and cull any rects that misses the crtc */ - if (clip_x1 >= units[i]->crtc.mode.hdisplay || - clip_y1 >= units[i]->crtc.mode.vdisplay || - clip_x2 <= 0 || clip_y2 <= 0) - continue; - - blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; - blits[blits_pos].body.srcScreenId = units[i]->unit; - blits[blits_pos].body.destOrigin.x = dest_x; - blits[blits_pos].body.destOrigin.y = dest_y; + int ret; - blits[blits_pos].body.srcRect.left = clip_x1; - blits[blits_pos].body.srcRect.top = clip_y1; - blits[blits_pos].body.srcRect.right = clip_x2; - blits[blits_pos].body.srcRect.bottom = clip_y2; - blits_pos++; - } + switch (dev_priv->active_display_unit) { + case vmw_du_screen_target: + ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips, + &surface->res, destX, destY, + num_clips, 1, NULL); + break; + case vmw_du_screen_object: + ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface, + sid, destX, destY, clips, + num_clips); + break; + default: + WARN_ONCE(true, + "Present called with invalid display system.\n"); + ret = -ENOSYS; + break; } - /* reset size here and use calculated exact size from loops */ - fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; - - ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, - 0, user_fence_rep, NULL); + if (ret) + return ret; - kfree(cmd); + vmw_fifo_flush(dev_priv, false); - return ret; + return 0; } int vmw_kms_init(struct vmw_private *dev_priv) @@ -1470,30 +1076,37 @@ int vmw_kms_init(struct vmw_private *dev_priv) dev->mode_config.funcs = &vmw_kms_funcs; dev->mode_config.min_width = 1; dev->mode_config.min_height = 1; - /* assumed largest fb size */ - dev->mode_config.max_width = 8192; - dev->mode_config.max_height = 8192; + dev->mode_config.max_width = dev_priv->texture_max_width; + dev->mode_config.max_height = dev_priv->texture_max_height; - ret = vmw_kms_init_screen_object_display(dev_priv); - if (ret) /* Fallback */ - (void)vmw_kms_init_legacy_display_system(dev_priv); + ret = vmw_kms_stdu_init_display(dev_priv); + if (ret) { + ret = vmw_kms_sou_init_display(dev_priv); + if (ret) /* Fallback */ + ret = vmw_kms_ldu_init_display(dev_priv); + } - return 0; + return ret; } int vmw_kms_close(struct vmw_private *dev_priv) { + int ret; + /* * Docs says we should take the lock before calling this function * but since it destroys encoders and our destructor calls * drm_encoder_cleanup which takes the lock we deadlock. */ drm_mode_config_cleanup(dev_priv->dev); - if (dev_priv->sou_priv) - vmw_kms_close_screen_object_display(dev_priv); + if (dev_priv->active_display_unit == vmw_du_screen_object) + ret = vmw_kms_sou_close_display(dev_priv); + else if (dev_priv->active_display_unit == vmw_du_screen_target) + ret = vmw_kms_stdu_close_display(dev_priv); else - vmw_kms_close_legacy_display_system(dev_priv); - return 0; + ret = vmw_kms_ldu_close_display(dev_priv); + + return ret; } int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, @@ -1569,7 +1182,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); else if (vmw_fifo_have_pitchlock(vmw_priv)) vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + - SVGA_FIFO_PITCHLOCK); + SVGA_FIFO_PITCHLOCK); if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) return 0; @@ -1641,7 +1254,9 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, uint32_t pitch, uint32_t height) { - return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; + return ((u64) pitch * (u64) height) < (u64) + ((dev_priv->active_display_unit == vmw_du_screen_target) ? + dev_priv->prim_bb_mem : dev_priv->vram_size); } @@ -1715,75 +1330,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, return 0; } -int vmw_du_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) -{ - struct vmw_private *dev_priv = vmw_priv(crtc->dev); - struct drm_framebuffer *old_fb = crtc->primary->fb; - struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); - struct drm_file *file_priv ; - struct vmw_fence_obj *fence = NULL; - struct drm_clip_rect clips; - int ret; - - if (event == NULL) - return -EINVAL; - - /* require ScreenObject support for page flipping */ - if (!dev_priv->sou_priv) - return -ENOSYS; - - file_priv = event->base.file_priv; - if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) - return -EINVAL; - - crtc->primary->fb = fb; - - /* do a full screen dirty update */ - clips.x1 = clips.y1 = 0; - clips.x2 = fb->width; - clips.y2 = fb->height; - - if (vfb->dmabuf) - ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb, - 0, 0, &clips, 1, 1, &fence); - else - ret = do_surface_dirty_sou(dev_priv, file_priv, vfb, - 0, 0, &clips, 1, 1, &fence); - - - if (ret != 0) - goto out_no_fence; - if (!fence) { - ret = -EINVAL; - goto out_no_fence; - } - - ret = vmw_event_fence_action_queue(file_priv, fence, - &event->base, - &event->event.tv_sec, - &event->event.tv_usec, - true); - - /* - * No need to hold on to this now. The only cleanup - * we need to do if we fail is unref the fence. - */ - vmw_fence_obj_unreference(&fence); - - if (vmw_crtc_to_du(crtc)->is_implicit) - vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc); - - return ret; - -out_no_fence: - crtc->primary->fb = old_fb; - return ret; -} - - void vmw_du_crtc_save(struct drm_crtc *crtc) { } @@ -1920,7 +1466,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay * members filled in. */ -static void vmw_guess_mode_timing(struct drm_display_mode *mode) +void vmw_guess_mode_timing(struct drm_display_mode *mode) { mode->hsync_start = mode->hdisplay + 50; mode->hsync_end = mode->hsync_start + 50; @@ -1955,36 +1501,39 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, * If using screen objects, then assume 32-bpp because that's what the * SVGA device is assuming */ - if (dev_priv->sou_priv) + if (dev_priv->active_display_unit == vmw_du_screen_object) assumed_bpp = 4; + if (dev_priv->active_display_unit == vmw_du_screen_target) { + max_width = min(max_width, dev_priv->stdu_max_width); + max_height = min(max_height, dev_priv->stdu_max_height); + } + /* Add preferred mode */ - { - mode = drm_mode_duplicate(dev, &prefmode); - if (!mode) - return 0; - mode->hdisplay = du->pref_width; - mode->vdisplay = du->pref_height; - vmw_guess_mode_timing(mode); - - if (vmw_kms_validate_mode_vram(dev_priv, - mode->hdisplay * assumed_bpp, - mode->vdisplay)) { - drm_mode_probed_add(connector, mode); - } else { - drm_mode_destroy(dev, mode); - mode = NULL; - } + mode = drm_mode_duplicate(dev, &prefmode); + if (!mode) + return 0; + mode->hdisplay = du->pref_width; + mode->vdisplay = du->pref_height; + vmw_guess_mode_timing(mode); - if (du->pref_mode) { - list_del_init(&du->pref_mode->head); - drm_mode_destroy(dev, du->pref_mode); - } + if (vmw_kms_validate_mode_vram(dev_priv, + mode->hdisplay * assumed_bpp, + mode->vdisplay)) { + drm_mode_probed_add(connector, mode); + } else { + drm_mode_destroy(dev, mode); + mode = NULL; + } - /* mode might be null here, this is intended */ - du->pref_mode = mode; + if (du->pref_mode) { + list_del_init(&du->pref_mode->head); + drm_mode_destroy(dev, du->pref_mode); } + /* mode might be null here, this is intended */ + du->pref_mode = mode; + for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { bmode = &vmw_kms_connector_builtin[i]; if (bmode->hdisplay > max_width || @@ -2004,11 +1553,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, drm_mode_probed_add(connector, mode); } - /* Move the prefered mode first, help apps pick the right mode. */ - if (du->pref_mode) - list_move(&du->pref_mode->head, &connector->probed_modes); - drm_mode_connector_list_update(connector, true); + /* Move the prefered mode first, help apps pick the right mode. */ + drm_mode_sort(&connector->modes); return 1; } @@ -2032,7 +1579,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, unsigned rects_size; int ret; int i; + u64 total_pixels = 0; struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_vmw_rect bounding_box = {0}; if (!arg->num_outputs) { struct drm_vmw_rect def_rect = {0, 0, 800, 600}; @@ -2063,6 +1612,40 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ret = -EINVAL; goto out_free; } + + /* + * bounding_box.w and bunding_box.h are used as + * lower-right coordinates + */ + if (rects[i].x + rects[i].w > bounding_box.w) + bounding_box.w = rects[i].x + rects[i].w; + + if (rects[i].y + rects[i].h > bounding_box.h) + bounding_box.h = rects[i].y + rects[i].h; + + total_pixels += (u64) rects[i].w * (u64) rects[i].h; + } + + if (dev_priv->active_display_unit == vmw_du_screen_target) { + /* + * For Screen Targets, the limits for a toplogy are: + * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem + * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem + */ + u64 bb_mem = bounding_box.w * bounding_box.h * 4; + u64 pixel_mem = total_pixels * 4; + + if (bb_mem > dev_priv->prim_bb_mem) { + DRM_ERROR("Topology is beyond supported limits.\n"); + ret = -EINVAL; + goto out_free; + } + + if (pixel_mem > dev_priv->prim_bb_mem) { + DRM_ERROR("Combined output size too large\n"); + ret = -EINVAL; + goto out_free; + } } vmw_du_update_layout(dev_priv, arg->num_outputs, rects); @@ -2071,3 +1654,419 @@ out_free: kfree(rects); return ret; } + +/** + * vmw_kms_helper_dirty - Helper to build commands and perform actions based + * on a set of cliprects and a set of display units. + * + * @dev_priv: Pointer to a device private structure. + * @framebuffer: Pointer to the framebuffer on which to perform the actions. + * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL. + * Cliprects are given in framebuffer coordinates. + * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must + * be NULL. Cliprects are given in source coordinates. + * @dest_x: X coordinate offset for the crtc / destination clip rects. + * @dest_y: Y coordinate offset for the crtc / destination clip rects. + * @num_clips: Number of cliprects in the @clips or @vclips array. + * @increment: Integer with which to increment the clip counter when looping. + * Used to skip a predetermined number of clip rects. + * @dirty: Closure structure. See the description of struct vmw_kms_dirty. + */ +int vmw_kms_helper_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + const struct drm_clip_rect *clips, + const struct drm_vmw_rect *vclips, + s32 dest_x, s32 dest_y, + int num_clips, + int increment, + struct vmw_kms_dirty *dirty) +{ + struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; + struct drm_crtc *crtc; + u32 num_units = 0; + u32 i, k; + int ret; + + dirty->dev_priv = dev_priv; + + list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { + if (crtc->primary->fb != &framebuffer->base) + continue; + units[num_units++] = vmw_crtc_to_du(crtc); + } + + for (k = 0; k < num_units; k++) { + struct vmw_display_unit *unit = units[k]; + s32 crtc_x = unit->crtc.x; + s32 crtc_y = unit->crtc.y; + s32 crtc_width = unit->crtc.mode.hdisplay; + s32 crtc_height = unit->crtc.mode.vdisplay; + const struct drm_clip_rect *clips_ptr = clips; + const struct drm_vmw_rect *vclips_ptr = vclips; + + dirty->unit = unit; + if (dirty->fifo_reserve_size > 0) { + dirty->cmd = vmw_fifo_reserve(dev_priv, + dirty->fifo_reserve_size); + if (!dirty->cmd) { + DRM_ERROR("Couldn't reserve fifo space " + "for dirty blits.\n"); + return ret; + } + memset(dirty->cmd, 0, dirty->fifo_reserve_size); + } + dirty->num_hits = 0; + for (i = 0; i < num_clips; i++, clips_ptr += increment, + vclips_ptr += increment) { + s32 clip_left; + s32 clip_top; + + /* + * Select clip array type. Note that integer type + * in @clips is unsigned short, whereas in @vclips + * it's 32-bit. + */ + if (clips) { + dirty->fb_x = (s32) clips_ptr->x1; + dirty->fb_y = (s32) clips_ptr->y1; + dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x - + crtc_x; + dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y - + crtc_y; + } else { + dirty->fb_x = vclips_ptr->x; + dirty->fb_y = vclips_ptr->y; + dirty->unit_x2 = dirty->fb_x + vclips_ptr->w + + dest_x - crtc_x; + dirty->unit_y2 = dirty->fb_y + vclips_ptr->h + + dest_y - crtc_y; + } + + dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x; + dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y; + + /* Skip this clip if it's outside the crtc region */ + if (dirty->unit_x1 >= crtc_width || + dirty->unit_y1 >= crtc_height || + dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0) + continue; + + /* Clip right and bottom to crtc limits */ + dirty->unit_x2 = min_t(s32, dirty->unit_x2, + crtc_width); + dirty->unit_y2 = min_t(s32, dirty->unit_y2, + crtc_height); + + /* Clip left and top to crtc limits */ + clip_left = min_t(s32, dirty->unit_x1, 0); + clip_top = min_t(s32, dirty->unit_y1, 0); + dirty->unit_x1 -= clip_left; + dirty->unit_y1 -= clip_top; + dirty->fb_x -= clip_left; + dirty->fb_y -= clip_top; + + dirty->clip(dirty); + } + + dirty->fifo_commit(dirty); + } + + return 0; +} + +/** + * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before + * command submission. + * + * @dev_priv. Pointer to a device private structure. + * @buf: The buffer object + * @interruptible: Whether to perform waits as interruptible. + * @validate_as_mob: Whether the buffer should be validated as a MOB. If false, + * The buffer will be validated as a GMR. Already pinned buffers will not be + * validated. + * + * Returns 0 on success, negative error code on failure, -ERESTARTSYS if + * interrupted by a signal. + */ +int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool interruptible, + bool validate_as_mob) +{ + struct ttm_buffer_object *bo = &buf->base; + int ret; + + ttm_bo_reserve(bo, false, false, interruptible, NULL); + ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, + validate_as_mob); + if (ret) + ttm_bo_unreserve(bo); + + return ret; +} + +/** + * vmw_kms_helper_buffer_revert - Undo the actions of + * vmw_kms_helper_buffer_prepare. + * + * @res: Pointer to the buffer object. + * + * Helper to be used if an error forces the caller to undo the actions of + * vmw_kms_helper_buffer_prepare. + */ +void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) +{ + if (buf) + ttm_bo_unreserve(&buf->base); +} + +/** + * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after + * kms command submission. + * + * @dev_priv: Pointer to a device private structure. + * @file_priv: Pointer to a struct drm_file representing the caller's + * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely + * if non-NULL, @user_fence_rep must be non-NULL. + * @buf: The buffer object. + * @out_fence: Optional pointer to a fence pointer. If non-NULL, a + * ref-counted fence pointer is returned here. + * @user_fence_rep: Optional pointer to a user-space provided struct + * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the + * function copies fence data to user-space in a fail-safe manner. + */ +void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_dma_buffer *buf, + struct vmw_fence_obj **out_fence, + struct drm_vmw_fence_rep __user * + user_fence_rep) +{ + struct vmw_fence_obj *fence; + uint32_t handle; + int ret; + + ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, + file_priv ? &handle : NULL); + if (buf) + vmw_fence_single_bo(&buf->base, fence); + if (file_priv) + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), + ret, user_fence_rep, fence, + handle); + if (out_fence) + *out_fence = fence; + else + vmw_fence_obj_unreference(&fence); + + vmw_kms_helper_buffer_revert(buf); +} + + +/** + * vmw_kms_helper_resource_revert - Undo the actions of + * vmw_kms_helper_resource_prepare. + * + * @res: Pointer to the resource. Typically a surface. + * + * Helper to be used if an error forces the caller to undo the actions of + * vmw_kms_helper_resource_prepare. + */ +void vmw_kms_helper_resource_revert(struct vmw_resource *res) +{ + vmw_kms_helper_buffer_revert(res->backup); + vmw_resource_unreserve(res, false, NULL, 0); + mutex_unlock(&res->dev_priv->cmdbuf_mutex); +} + +/** + * vmw_kms_helper_resource_prepare - Reserve and validate a resource before + * command submission. + * + * @res: Pointer to the resource. Typically a surface. + * @interruptible: Whether to perform waits as interruptible. + * + * Reserves and validates also the backup buffer if a guest-backed resource. + * Returns 0 on success, negative error code on failure. -ERESTARTSYS if + * interrupted by a signal. + */ +int vmw_kms_helper_resource_prepare(struct vmw_resource *res, + bool interruptible) +{ + int ret = 0; + + if (interruptible) + ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); + else + mutex_lock(&res->dev_priv->cmdbuf_mutex); + + if (unlikely(ret != 0)) + return -ERESTARTSYS; + + ret = vmw_resource_reserve(res, interruptible, false); + if (ret) + goto out_unlock; + + if (res->backup) { + ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup, + interruptible, + res->dev_priv->has_mob); + if (ret) + goto out_unreserve; + } + ret = vmw_resource_validate(res); + if (ret) + goto out_revert; + return 0; + +out_revert: + vmw_kms_helper_buffer_revert(res->backup); +out_unreserve: + vmw_resource_unreserve(res, false, NULL, 0); +out_unlock: + mutex_unlock(&res->dev_priv->cmdbuf_mutex); + return ret; +} + +/** + * vmw_kms_helper_resource_finish - Unreserve and fence a resource after + * kms command submission. + * + * @res: Pointer to the resource. Typically a surface. + * @out_fence: Optional pointer to a fence pointer. If non-NULL, a + * ref-counted fence pointer is returned here. + */ +void vmw_kms_helper_resource_finish(struct vmw_resource *res, + struct vmw_fence_obj **out_fence) +{ + if (res->backup || out_fence) + vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, + out_fence, NULL); + + vmw_resource_unreserve(res, false, NULL, 0); + mutex_unlock(&res->dev_priv->cmdbuf_mutex); +} + +/** + * vmw_kms_update_proxy - Helper function to update a proxy surface from + * its backing MOB. + * + * @res: Pointer to the surface resource + * @clips: Clip rects in framebuffer (surface) space. + * @num_clips: Number of clips in @clips. + * @increment: Integer with which to increment the clip counter when looping. + * Used to skip a predetermined number of clip rects. + * + * This function makes sure the proxy surface is updated from its backing MOB + * using the region given by @clips. The surface resource @res and its backing + * MOB needs to be reserved and validated on call. + */ +int vmw_kms_update_proxy(struct vmw_resource *res, + const struct drm_clip_rect *clips, + unsigned num_clips, + int increment) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdUpdateGBImage body; + } *cmd; + SVGA3dBox *box; + size_t copy_size = 0; + int i; + + if (!clips) + return 0; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); + if (!cmd) { + DRM_ERROR("Couldn't reserve fifo space for proxy surface " + "update.\n"); + return -ENOMEM; + } + + for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { + box = &cmd->body.box; + + cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; + cmd->header.size = sizeof(cmd->body); + cmd->body.image.sid = res->id; + cmd->body.image.face = 0; + cmd->body.image.mipmap = 0; + + if (clips->x1 > size->width || clips->x2 > size->width || + clips->y1 > size->height || clips->y2 > size->height) { + DRM_ERROR("Invalid clips outsize of framebuffer.\n"); + return -EINVAL; + } + + box->x = clips->x1; + box->y = clips->y1; + box->z = 0; + box->w = clips->x2 - clips->x1; + box->h = clips->y2 - clips->y1; + box->d = 1; + + copy_size += sizeof(*cmd); + } + + vmw_fifo_commit(dev_priv, copy_size); + + return 0; +} + +int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, + unsigned unit, + u32 max_width, + u32 max_height, + struct drm_connector **p_con, + struct drm_crtc **p_crtc, + struct drm_display_mode **p_mode) +{ + struct drm_connector *con; + struct vmw_display_unit *du; + struct drm_display_mode *mode; + int i = 0; + + list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, + head) { + if (i == unit) + break; + + ++i; + } + + if (i != unit) { + DRM_ERROR("Could not find initial display unit.\n"); + return -EINVAL; + } + + if (list_empty(&con->modes)) + (void) vmw_du_connector_fill_modes(con, max_width, max_height); + + if (list_empty(&con->modes)) { + DRM_ERROR("Could not find initial display mode.\n"); + return -EINVAL; + } + + du = vmw_connector_to_du(con); + *p_con = con; + *p_crtc = &du->crtc; + + list_for_each_entry(mode, &con->modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) + break; + } + + if (mode->type & DRM_MODE_TYPE_PREFERRED) + *p_mode = mode; + else { + WARN_ONCE(true, "Could not find initial preferred mode.\n"); + *p_mode = list_first_entry(&con->modes, + struct drm_display_mode, + head); + } + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index f1a324cfb4c3..782df7ca9794 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -32,11 +32,60 @@ #include <drm/drm_crtc_helper.h> #include "vmwgfx_drv.h" +/** + * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty + * function. + * + * @fifo_commit: Callback that is called once for each display unit after + * all clip rects. This function must commit the fifo space reserved by the + * helper. Set up by the caller. + * @clip: Callback that is called for each cliprect on each display unit. + * Set up by the caller. + * @fifo_reserve_size: Fifo size that the helper should try to allocat for + * each display unit. Set up by the caller. + * @dev_priv: Pointer to the device private. Set up by the helper. + * @unit: The current display unit. Set up by the helper before a call to @clip. + * @cmd: The allocated fifo space. Set up by the helper before the first @clip + * call. + * @num_hits: Number of clip rect commands for this display unit. + * Cleared by the helper before the first @clip call. Updated by the @clip + * callback. + * @fb_x: Clip rect left side in framebuffer coordinates. + * @fb_y: Clip rect right side in framebuffer coordinates. + * @unit_x1: Clip rect left side in crtc coordinates. + * @unit_y1: Clip rect top side in crtc coordinates. + * @unit_x2: Clip rect right side in crtc coordinates. + * @unit_y2: Clip rect bottom side in crtc coordinates. + * + * The clip rect coordinates are updated by the helper for each @clip call. + * Note that this may be derived from if more info needs to be passed between + * helper caller and helper callbacks. + */ +struct vmw_kms_dirty { + void (*fifo_commit)(struct vmw_kms_dirty *); + void (*clip)(struct vmw_kms_dirty *); + size_t fifo_reserve_size; + struct vmw_private *dev_priv; + struct vmw_display_unit *unit; + void *cmd; + u32 num_hits; + s32 fb_x; + s32 fb_y; + s32 unit_x1; + s32 unit_y1; + s32 unit_x2; + s32 unit_y2; +}; + #define VMWGFX_NUM_DISPLAY_UNITS 8 #define vmw_framebuffer_to_vfb(x) \ container_of(x, struct vmw_framebuffer, base) +#define vmw_framebuffer_to_vfbs(x) \ + container_of(x, struct vmw_framebuffer_surface, base.base) +#define vmw_framebuffer_to_vfbd(x) \ + container_of(x, struct vmw_framebuffer_dmabuf, base.base) /** * Base class for framebuffers @@ -53,9 +102,27 @@ struct vmw_framebuffer { uint32_t user_handle; }; +/* + * Clip rectangle + */ +struct vmw_clip_rect { + int x1, x2, y1, y2; +}; + +struct vmw_framebuffer_surface { + struct vmw_framebuffer base; + struct vmw_surface *surface; + struct vmw_dma_buffer *buffer; + struct list_head head; + bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */ +}; + + +struct vmw_framebuffer_dmabuf { + struct vmw_framebuffer base; + struct vmw_dma_buffer *buffer; +}; -#define vmw_crtc_to_du(x) \ - container_of(x, struct vmw_display_unit, crtc) /* * Basic cursor manipulation @@ -120,11 +187,7 @@ struct vmw_display_unit { /* * Shared display unit functions - vmwgfx_kms.c */ -void vmw_display_unit_cleanup(struct vmw_display_unit *du); -int vmw_du_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags); +void vmw_du_cleanup(struct vmw_display_unit *du); void vmw_du_crtc_save(struct drm_crtc *crtc); void vmw_du_crtc_restore(struct drm_crtc *crtc); void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, @@ -143,25 +206,118 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, int vmw_du_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val); +int vmw_kms_helper_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + const struct drm_clip_rect *clips, + const struct drm_vmw_rect *vclips, + s32 dest_x, s32 dest_y, + int num_clips, + int increment, + struct vmw_kms_dirty *dirty); +int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool interruptible, + bool validate_as_mob); +void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf); +void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_dma_buffer *buf, + struct vmw_fence_obj **out_fence, + struct drm_vmw_fence_rep __user * + user_fence_rep); +int vmw_kms_helper_resource_prepare(struct vmw_resource *res, + bool interruptible); +void vmw_kms_helper_resource_revert(struct vmw_resource *res); +void vmw_kms_helper_resource_finish(struct vmw_resource *res, + struct vmw_fence_obj **out_fence); +int vmw_kms_readback(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_vmw_rect *vclips, + uint32_t num_clips); +struct vmw_framebuffer * +vmw_kms_new_framebuffer(struct vmw_private *dev_priv, + struct vmw_dma_buffer *dmabuf, + struct vmw_surface *surface, + bool only_2d, + const struct drm_mode_fb_cmd *mode_cmd); +int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, + unsigned unit, + u32 max_width, + u32 max_height, + struct drm_connector **p_con, + struct drm_crtc **p_crtc, + struct drm_display_mode **p_mode); +void vmw_guess_mode_timing(struct drm_display_mode *mode); /* * Legacy display unit functions - vmwgfx_ldu.c */ -int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); -int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); +int vmw_kms_ldu_init_display(struct vmw_private *dev_priv); +int vmw_kms_ldu_close_display(struct vmw_private *dev_priv); +int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips, int increment); +int vmw_kms_update_proxy(struct vmw_resource *res, + const struct drm_clip_rect *clips, + unsigned num_clips, + int increment); /* * Screen Objects display functions - vmwgfx_scrn.c */ -int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv); -int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv); -int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num, - struct drm_vmw_rect *rects); -bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, - struct drm_crtc *crtc); -void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, - struct drm_crtc *crtc); +int vmw_kms_sou_init_display(struct vmw_private *dev_priv); +int vmw_kms_sou_close_display(struct vmw_private *dev_priv); +int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + struct vmw_resource *srf, + s32 dest_x, + s32 dest_y, + unsigned num_clips, int inc, + struct vmw_fence_obj **out_fence); +int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + unsigned num_clips, int increment, + bool interruptible, + struct vmw_fence_obj **out_fence); +int vmw_kms_sou_readback(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_vmw_rect *vclips, + uint32_t num_clips); + +/* + * Screen Target Display Unit functions - vmwgfx_stdu.c + */ +int vmw_kms_stdu_init_display(struct vmw_private *dev_priv); +int vmw_kms_stdu_close_display(struct vmw_private *dev_priv); +int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + struct vmw_resource *srf, + s32 dest_x, + s32 dest_y, + unsigned num_clips, int inc, + struct vmw_fence_obj **out_fence); +int vmw_kms_stdu_dma(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + uint32_t num_clips, + int increment, + bool to_surface, + bool interruptible); #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 5c289f748ab4..bb63e4d795fa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -57,7 +57,7 @@ struct vmw_legacy_display_unit { static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) { list_del_init(&ldu->active); - vmw_display_unit_cleanup(&ldu->base); + vmw_du_cleanup(&ldu->base); kfree(ldu); } @@ -279,7 +279,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) return -EINVAL; } - vmw_fb_off(dev_priv); + vmw_svga_enable(dev_priv); crtc->primary->fb = fb; encoder->crtc = crtc; @@ -385,7 +385,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) return 0; } -int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) +int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; int i, ret; @@ -422,6 +422,10 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) else vmw_ldu_init(dev_priv, 0); + dev_priv->active_display_unit = vmw_du_legacy; + + DRM_INFO("Legacy Display Unit initialized\n"); + return 0; err_vblank_cleanup: @@ -432,7 +436,7 @@ err_free: return ret; } -int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) +int vmw_kms_ldu_close_display(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; @@ -447,3 +451,38 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) return 0; } + + +int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips, int increment) +{ + size_t fifo_size; + int i; + + struct { + uint32_t header; + SVGAFifoCmdUpdate body; + } *cmd; + + fifo_size = sizeof(*cmd) * num_clips; + cmd = vmw_fifo_reserve(dev_priv, fifo_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + return -ENOMEM; + } + + memset(cmd, 0, fifo_size); + for (i = 0; i < num_clips; i++, clips += increment) { + cmd[i].header = SVGA_CMD_UPDATE; + cmd[i].body.x = clips->x1; + cmd[i].body.y = clips->y1; + cmd[i].body.width = clips->x2 - clips->x1; + cmd[i].body.height = clips->y2 - clips->y1; + } + + vmw_fifo_commit(dev_priv, fifo_size); + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 04a64b8cd3cd..23db16008e39 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,7 +31,7 @@ * If we set up the screen target otable, screen objects stop working. */ -#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) +#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1)) #ifdef CONFIG_64BIT #define VMW_PPN_SIZE 8 @@ -67,9 +67,23 @@ struct vmw_mob { * @size: Size of the table (page-aligned). * @page_table: Pointer to a struct vmw_mob holding the page table. */ -struct vmw_otable { - unsigned long size; - struct vmw_mob *page_table; +static const struct vmw_otable pre_dx_tables[] = { + {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, + NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE} +}; + +static const struct vmw_otable dx_tables[] = { + {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, + {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, + NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}, + {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, }; static int vmw_mob_pt_populate(struct vmw_private *dev_priv, @@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, */ static int vmw_setup_otable_base(struct vmw_private *dev_priv, SVGAOTableType type, + struct ttm_buffer_object *otable_bo, unsigned long offset, struct vmw_otable *otable) { @@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, BUG_ON(otable->page_table != NULL); - vsgt = vmw_bo_sg_table(dev_priv->otable_bo); + vsgt = vmw_bo_sg_table(otable_bo); vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); WARN_ON(!vmw_piter_next(&iter)); @@ -142,7 +157,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; cmd->header.size = sizeof(cmd->body); cmd->body.type = type; - cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); + cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; cmd->body.sizeInBytes = otable->size; cmd->body.validSizeInBytes = 0; cmd->body.ptDepth = mob->pt_level; @@ -191,18 +206,19 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving FIFO space for OTable " "takedown.\n"); - } else { - memset(cmd, 0, sizeof(*cmd)); - cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; - cmd->header.size = sizeof(cmd->body); - cmd->body.type = type; - cmd->body.baseAddress = 0; - cmd->body.sizeInBytes = 0; - cmd->body.validSizeInBytes = 0; - cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + return; } + memset(cmd, 0, sizeof(*cmd)); + cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; + cmd->header.size = sizeof(cmd->body); + cmd->body.type = type; + cmd->body.baseAddress = 0; + cmd->body.sizeInBytes = 0; + cmd->body.validSizeInBytes = 0; + cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + if (bo) { int ret; @@ -217,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, otable->page_table = NULL; } -/* - * vmw_otables_setup - Set up guest backed memory object tables - * - * @dev_priv: Pointer to a device private structure - * - * Takes care of the device guest backed surface - * initialization, by setting up the guest backed memory object tables. - * Returns 0 on success and various error codes on failure. A succesful return - * means the object tables can be taken down using the vmw_otables_takedown - * function. - */ -int vmw_otables_setup(struct vmw_private *dev_priv) + +static int vmw_otable_batch_setup(struct vmw_private *dev_priv, + struct vmw_otable_batch *batch) { unsigned long offset; unsigned long bo_size; - struct vmw_otable *otables; + struct vmw_otable *otables = batch->otables; SVGAOTableType i; int ret; - otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), - GFP_KERNEL); - if (unlikely(otables == NULL)) { - DRM_ERROR("Failed to allocate space for otable " - "metadata.\n"); - return -ENOMEM; - } - - otables[SVGA_OTABLE_MOB].size = - VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; - otables[SVGA_OTABLE_SURFACE].size = - VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; - otables[SVGA_OTABLE_CONTEXT].size = - VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; - otables[SVGA_OTABLE_SHADER].size = - VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; - otables[SVGA_OTABLE_SCREEN_TARGET].size = - VMWGFX_NUM_GB_SCREEN_TARGET * - SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; - bo_size = 0; - for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { + for (i = 0; i < batch->num_otables; ++i) { + if (!otables[i].enabled) + continue; + otables[i].size = (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; bo_size += otables[i].size; @@ -267,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv) ttm_bo_type_device, &vmw_sys_ne_placement, 0, false, NULL, - &dev_priv->otable_bo); + &batch->otable_bo); if (unlikely(ret != 0)) goto out_no_bo; - ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); + ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL); BUG_ON(ret != 0); - ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); + ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); if (unlikely(ret != 0)) goto out_unreserve; - ret = vmw_bo_map_dma(dev_priv->otable_bo); + ret = vmw_bo_map_dma(batch->otable_bo); if (unlikely(ret != 0)) goto out_unreserve; - ttm_bo_unreserve(dev_priv->otable_bo); + ttm_bo_unreserve(batch->otable_bo); offset = 0; - for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { - ret = vmw_setup_otable_base(dev_priv, i, offset, + for (i = 0; i < batch->num_otables; ++i) { + if (!batch->otables[i].enabled) + continue; + + ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, + offset, &otables[i]); if (unlikely(ret != 0)) goto out_no_setup; offset += otables[i].size; } - dev_priv->otables = otables; return 0; out_unreserve: - ttm_bo_unreserve(dev_priv->otable_bo); + ttm_bo_unreserve(batch->otable_bo); out_no_setup: - for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) - vmw_takedown_otable_base(dev_priv, i, &otables[i]); + for (i = 0; i < batch->num_otables; ++i) { + if (batch->otables[i].enabled) + vmw_takedown_otable_base(dev_priv, i, + &batch->otables[i]); + } - ttm_bo_unref(&dev_priv->otable_bo); + ttm_bo_unref(&batch->otable_bo); out_no_bo: - kfree(otables); return ret; } - /* - * vmw_otables_takedown - Take down guest backed memory object tables + * vmw_otables_setup - Set up guest backed memory object tables * * @dev_priv: Pointer to a device private structure * - * Take down the Guest Memory Object tables. + * Takes care of the device guest backed surface + * initialization, by setting up the guest backed memory object tables. + * Returns 0 on success and various error codes on failure. A successful return + * means the object tables can be taken down using the vmw_otables_takedown + * function. */ -void vmw_otables_takedown(struct vmw_private *dev_priv) +int vmw_otables_setup(struct vmw_private *dev_priv) +{ + struct vmw_otable **otables = &dev_priv->otable_batch.otables; + int ret; + + if (dev_priv->has_dx) { + *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL); + if (*otables == NULL) + return -ENOMEM; + + memcpy(*otables, dx_tables, sizeof(dx_tables)); + dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); + } else { + *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL); + if (*otables == NULL) + return -ENOMEM; + + memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables)); + dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); + } + + ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch); + if (unlikely(ret != 0)) + goto out_setup; + + return 0; + +out_setup: + kfree(*otables); + return ret; +} + +static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, + struct vmw_otable_batch *batch) { SVGAOTableType i; - struct ttm_buffer_object *bo = dev_priv->otable_bo; + struct ttm_buffer_object *bo = batch->otable_bo; int ret; - for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) - vmw_takedown_otable_base(dev_priv, i, - &dev_priv->otables[i]); + for (i = 0; i < batch->num_otables; ++i) + if (batch->otables[i].enabled) + vmw_takedown_otable_base(dev_priv, i, + &batch->otables[i]); ret = ttm_bo_reserve(bo, false, true, false, NULL); BUG_ON(ret != 0); @@ -331,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv) vmw_fence_single_bo(bo, NULL); ttm_bo_unreserve(bo); - ttm_bo_unref(&dev_priv->otable_bo); - kfree(dev_priv->otables); - dev_priv->otables = NULL; + ttm_bo_unref(&batch->otable_bo); } +/* + * vmw_otables_takedown - Take down guest backed memory object tables + * + * @dev_priv: Pointer to a device private structure + * + * Take down the Guest Memory Object tables. + */ +void vmw_otables_takedown(struct vmw_private *dev_priv) +{ + vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch); + kfree(dev_priv->otable_batch.otables); +} /* * vmw_mob_calculate_pt_pages - Calculate the number of page table pages @@ -409,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, goto out_unreserve; ttm_bo_unreserve(mob->pt_bo); - + return 0; out_unreserve: @@ -429,15 +471,15 @@ out_unreserve: * *@addr according to the page table entry size. */ #if (VMW_PPN_SIZE == 8) -static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) +static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) { - *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); + *((u64 *) *addr) = val >> PAGE_SHIFT; *addr += 2; } #else -static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) +static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) { - *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); + *(*addr)++ = val >> PAGE_SHIFT; } #endif @@ -459,7 +501,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); unsigned long pt_page; - __le32 *addr, *save_addr; + u32 *addr, *save_addr; unsigned long i; struct page *page; @@ -574,7 +616,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, vmw_fence_single_bo(bo, NULL); ttm_bo_unreserve(bo); } - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); } /* @@ -627,7 +669,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; } - (void) vmw_3d_resource_inc(dev_priv, false); + vmw_fifo_resource_inc(dev_priv); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { @@ -640,7 +682,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, cmd->header.size = sizeof(cmd->body); cmd->body.mobid = mob_id; cmd->body.ptDepth = mob->pt_level; - cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); + cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; vmw_fifo_commit(dev_priv, sizeof(*cmd)); @@ -648,7 +690,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, return 0; out_no_cmd_space: - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); if (pt_set_up) ttm_bo_unref(&mob->pt_bo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 87e39f68e9d0..76069f093ccf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,8 +31,8 @@ #include <drm/ttm/ttm_placement.h> -#include "svga_overlay.h" -#include "svga_escape.h" +#include "device_include/svga_overlay.h" +#include "device_include/svga_escape.h" #define VMW_MAX_NUM_STREAMS 1 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) @@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, { struct vmw_escape_video_flush *flush; size_t fifo_size; - bool have_so = dev_priv->sou_priv ? true : false; + bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object); int i, num_items; SVGAGuestPtr ptr; @@ -231,10 +231,10 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, if (!pin) return vmw_dmabuf_unpin(dev_priv, buf, inter); - if (!dev_priv->sou_priv) - return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); + if (dev_priv->active_display_unit == vmw_du_legacy) + return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter); - return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); + return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter); } /** @@ -453,7 +453,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv) static bool vmw_overlay_available(const struct vmw_private *dev_priv) { - return (dev_priv->overlay_priv != NULL && + return (dev_priv->overlay_priv != NULL && ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) == VMW_OVERLAY_CAP_MASK)); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h index 9d0dd3a342eb..dce798053a96 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -39,19 +39,17 @@ #define VMWGFX_IRQSTATUS_PORT 0x8 struct svga_guest_mem_descriptor { - __le32 ppn; - __le32 num_pages; + u32 ppn; + u32 num_pages; }; struct svga_fifo_cmd_fence { - __le32 fence; + u32 fence; }; #define SVGA_SYNC_GENERIC 1 #define SVGA_SYNC_FIFOFULL 2 -#include "svga_types.h" - -#include "svga3d_reg.h" +#include "device_include/svga3d_reg.h" #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 210ef15b1d09..c1912f852b42 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,6 +31,7 @@ #include <drm/ttm/ttm_placement.h> #include <drm/drmP.h> #include "vmwgfx_resource_priv.h" +#include "vmwgfx_binding.h" #define VMW_RES_EVICT_ERR_COUNT 10 @@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref) int id; struct idr *idr = &dev_priv->res_idr[res->func->res_type]; + write_lock(&dev_priv->resource_lock); res->avail = false; list_del_init(&res->lru_head); write_unlock(&dev_priv->resource_lock); @@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref) } if (likely(res->hw_destroy != NULL)) { - res->hw_destroy(res); mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_kill(&res->binding_head); + vmw_binding_res_list_kill(&res->binding_head); mutex_unlock(&dev_priv->binding_mutex); + res->hw_destroy(res); } id = res->id; @@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref) kfree(res); write_lock(&dev_priv->resource_lock); - if (id != -1) idr_remove(idr, id); + write_unlock(&dev_priv->resource_lock); } void vmw_resource_unreference(struct vmw_resource **p_res) { struct vmw_resource *res = *p_res; - struct vmw_private *dev_priv = res->dev_priv; *p_res = NULL; - write_lock(&dev_priv->resource_lock); kref_put(&res->kref, vmw_resource_release); - write_unlock(&dev_priv->resource_lock); } @@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res, write_unlock(&dev_priv->resource_lock); } -struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, - struct idr *idr, int id) +static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, + struct idr *idr, int id) { struct vmw_resource *res; read_lock(&dev_priv->resource_lock); res = idr_find(idr, id); - if (res && res->avail) - kref_get(&res->kref); - else + if (!res || !res->avail || !kref_get_unless_zero(&res->kref)) res = NULL; + read_unlock(&dev_priv->resource_lock); if (unlikely(res == NULL)) @@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_user_stream_size, false, true); + ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) DRM_ERROR("Out of graphics memory for stream" " creation.\n"); - goto out_unlock; - } + goto out_ret; + } stream = kmalloc(sizeof(*stream), GFP_KERNEL); if (unlikely(stream == NULL)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_stream_size); ret = -ENOMEM; - goto out_unlock; + goto out_ret; } res = &stream->stream.res; @@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); if (unlikely(ret != 0)) - goto out_unlock; + goto out_ret; tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, @@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, arg->stream_id = res->id; out_err: vmw_resource_unreference(&res); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); +out_ret: return ret; } @@ -1152,14 +1150,16 @@ out_bind_failed: * command submission. * * @res: Pointer to the struct vmw_resource to unreserve. + * @switch_backup: Backup buffer has been switched. * @new_backup: Pointer to new backup buffer if command submission - * switched. - * @new_backup_offset: New backup offset if @new_backup is !NULL. + * switched. May be NULL. + * @new_backup_offset: New backup offset if @switch_backup is true. * * Currently unreserving a resource means putting it back on the device's * resource lru list, so that it can be evicted if necessary. */ void vmw_resource_unreserve(struct vmw_resource *res, + bool switch_backup, struct vmw_dma_buffer *new_backup, unsigned long new_backup_offset) { @@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (!list_empty(&res->lru_head)) return; - if (new_backup && new_backup != res->backup) { - + if (switch_backup && new_backup != res->backup) { if (res->backup) { lockdep_assert_held(&res->backup->base.resv->lock.base); list_del_init(&res->mob_head); vmw_dmabuf_unreference(&res->backup); } - res->backup = vmw_dmabuf_reference(new_backup); - lockdep_assert_held(&new_backup->base.resv->lock.base); - list_add_tail(&res->mob_head, &new_backup->res_list); + if (new_backup) { + res->backup = vmw_dmabuf_reference(new_backup); + lockdep_assert_held(&new_backup->base.resv->lock.base); + list_add_tail(&res->mob_head, &new_backup->res_list); + } else { + res->backup = NULL; + } } - if (new_backup) + if (switch_backup) res->backup_offset = new_backup_offset; - if (!res->func->may_evict || res->id == -1) + if (!res->func->may_evict || res->id == -1 || res->pin_count) return; write_lock(&dev_priv->resource_lock); @@ -1259,7 +1262,8 @@ out_no_reserve: * the buffer may not be bound to the resource at this point. * */ -int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) +int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, + bool no_backup) { struct vmw_private *dev_priv = res->dev_priv; int ret; @@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) if (res->func->needs_backup && res->backup == NULL && !no_backup) { - ret = vmw_resource_buf_alloc(res, true); - if (unlikely(ret != 0)) + ret = vmw_resource_buf_alloc(res, interruptible); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a backup buffer " + "of size %lu. bytes\n", + (unsigned long) res->backup_size); return ret; + } } return 0; @@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) * @res: The resource to evict. * @interruptible: Whether to wait interruptible. */ -int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) +static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; @@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res) struct ttm_validate_buffer val_buf; unsigned err_count = 0; - if (likely(!res->func->may_evict)) + if (!res->func->create) return 0; val_buf.bo = NULL; @@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, /** * vmw_resource_move_notify - TTM move_notify_callback * - * @bo: The TTM buffer object about to move. - * @mem: The truct ttm_mem_reg indicating to what memory - * region the move is taking place. + * @bo: The TTM buffer object about to move. + * @mem: The struct ttm_mem_reg indicating to what memory + * region the move is taking place. * * Evicts the Guest Backed hardware resource if the backup * buffer is being moved out of MOB memory. @@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, } } + + +/** + * vmw_query_readback_all - Read back cached query states + * + * @dx_query_mob: Buffer containing the DX query MOB + * + * Read back cached states from the device if they exist. This function + * assumings binding_mutex is held. + */ +int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) +{ + struct vmw_resource *dx_query_ctx; + struct vmw_private *dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXReadbackAllQuery body; + } *cmd; + + + /* No query bound, so do nothing */ + if (!dx_query_mob || !dx_query_mob->dx_query_ctx) + return 0; + + dx_query_ctx = dx_query_mob->dx_query_ctx; + dev_priv = dx_query_ctx->dev_priv; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for " + "query MOB read back.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = dx_query_ctx->id; + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + /* Triggers a rebind the next time affected context is bound */ + dx_query_mob->dx_query_ctx = NULL; + + return 0; +} + + + +/** + * vmw_query_move_notify - Read back cached query states + * + * @bo: The TTM buffer object about to move. + * @mem: The memory region @bo is moving to. + * + * Called before the query MOB is swapped out to read back cached query + * states from the device. + */ +void vmw_query_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) +{ + struct vmw_dma_buffer *dx_query_mob; + struct ttm_bo_device *bdev = bo->bdev; + struct vmw_private *dev_priv; + + + dev_priv = container_of(bdev, struct vmw_private, bdev); + + mutex_lock(&dev_priv->binding_mutex); + + dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); + if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { + mutex_unlock(&dev_priv->binding_mutex); + return; + } + + /* If BO is being moved from MOB to system memory */ + if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) { + struct vmw_fence_obj *fence; + + (void) vmw_query_readback_all(dx_query_mob); + mutex_unlock(&dev_priv->binding_mutex); + + /* Create a fence and attach the BO to it */ + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + vmw_fence_single_bo(bo, fence); + + if (fence != NULL) + vmw_fence_obj_unreference(&fence); + + (void) ttm_bo_wait(bo, false, false, false); + } else + mutex_unlock(&dev_priv->binding_mutex); + +} + /** * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. * @@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv) mutex_unlock(&dev_priv->cmdbuf_mutex); } + +/** + * vmw_resource_pin - Add a pin reference on a resource + * + * @res: The resource to add a pin reference on + * + * This function adds a pin reference, and if needed validates the resource. + * Having a pin reference means that the resource can never be evicted, and + * its id will never change as long as there is a pin reference. + * This function returns 0 on success and a negative error code on failure. + */ +int vmw_resource_pin(struct vmw_resource *res, bool interruptible) +{ + struct vmw_private *dev_priv = res->dev_priv; + int ret; + + ttm_write_lock(&dev_priv->reservation_sem, interruptible); + mutex_lock(&dev_priv->cmdbuf_mutex); + ret = vmw_resource_reserve(res, interruptible, false); + if (ret) + goto out_no_reserve; + + if (res->pin_count == 0) { + struct vmw_dma_buffer *vbo = NULL; + + if (res->backup) { + vbo = res->backup; + + ttm_bo_reserve(&vbo->base, interruptible, false, false, + NULL); + if (!vbo->pin_count) { + ret = ttm_bo_validate + (&vbo->base, + res->func->backup_placement, + interruptible, false); + if (ret) { + ttm_bo_unreserve(&vbo->base); + goto out_no_validate; + } + } + + /* Do we really need to pin the MOB as well? */ + vmw_bo_pin_reserved(vbo, true); + } + ret = vmw_resource_validate(res); + if (vbo) + ttm_bo_unreserve(&vbo->base); + if (ret) + goto out_no_validate; + } + res->pin_count++; + +out_no_validate: + vmw_resource_unreserve(res, false, NULL, 0UL); +out_no_reserve: + mutex_unlock(&dev_priv->cmdbuf_mutex); + ttm_write_unlock(&dev_priv->reservation_sem); + + return ret; +} + +/** + * vmw_resource_unpin - Remove a pin reference from a resource + * + * @res: The resource to remove a pin reference from + * + * Having a pin reference means that the resource can never be evicted, and + * its id will never change as long as there is a pin reference. + */ +void vmw_resource_unpin(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + int ret; + + ttm_read_lock(&dev_priv->reservation_sem, false); + mutex_lock(&dev_priv->cmdbuf_mutex); + + ret = vmw_resource_reserve(res, false, true); + WARN_ON(ret); + + WARN_ON(res->pin_count == 0); + if (--res->pin_count == 0 && res->backup) { + struct vmw_dma_buffer *vbo = res->backup; + + ttm_bo_reserve(&vbo->base, false, false, false, NULL); + vmw_bo_pin_reserved(vbo, false); + ttm_bo_unreserve(&vbo->base); + } + + vmw_resource_unreserve(res, false, NULL, 0UL); + + mutex_unlock(&dev_priv->cmdbuf_mutex); + ttm_read_unlock(&dev_priv->reservation_sem); +} + +/** + * vmw_res_type - Return the resource type + * + * @res: Pointer to the resource + */ +enum vmw_res_type vmw_res_type(const struct vmw_resource *res) +{ + return res->func->res_type; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h index f3adeed2854c..5994ef6265e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -30,6 +30,12 @@ #include "vmwgfx_drv.h" +enum vmw_cmdbuf_res_state { + VMW_CMDBUF_RES_COMMITTED, + VMW_CMDBUF_RES_ADD, + VMW_CMDBUF_RES_DEL +}; + /** * struct vmw_user_resource_conv - Identify a derived user-exported resource * type and provide a function to convert its ttm_base_object pointer to @@ -55,8 +61,10 @@ struct vmw_user_resource_conv { * @bind: Bind a hardware resource to persistent buffer storage. * @unbind: Unbind a hardware resource from persistent * buffer storage. + * @commit_notify: If the resource is a command buffer managed resource, + * callback to notify that a define or remove command + * has been committed to the device. */ - struct vmw_res_func { enum vmw_res_type res_type; bool needs_backup; @@ -71,6 +79,8 @@ struct vmw_res_func { int (*unbind) (struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf); + void (*commit_notify)(struct vmw_resource *res, + enum vmw_cmdbuf_res_state state); }; int vmw_resource_alloc_id(struct vmw_resource *res); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 7dc591d04d9a..b96d1ab610c5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -36,10 +36,55 @@ #define vmw_connector_to_sou(x) \ container_of(x, struct vmw_screen_object_unit, base.connector) +/** + * struct vmw_kms_sou_surface_dirty - Closure structure for + * blit surface to screen command. + * @base: The base type we derive from. Used by vmw_kms_helper_dirty(). + * @left: Left side of bounding box. + * @right: Right side of bounding box. + * @top: Top side of bounding box. + * @bottom: Bottom side of bounding box. + * @dst_x: Difference between source clip rects and framebuffer coordinates. + * @dst_y: Difference between source clip rects and framebuffer coordinates. + * @sid: Surface id of surface to copy from. + */ +struct vmw_kms_sou_surface_dirty { + struct vmw_kms_dirty base; + s32 left, right, top, bottom; + s32 dst_x, dst_y; + u32 sid; +}; + +/* + * SVGA commands that are used by this code. Please see the device headers + * for explanation. + */ +struct vmw_kms_sou_readback_blit { + uint32 header; + SVGAFifoCmdBlitScreenToGMRFB body; +}; + +struct vmw_kms_sou_dmabuf_blit { + uint32 header; + SVGAFifoCmdBlitGMRFBToScreen body; +}; + +struct vmw_kms_sou_dirty_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdBlitSurfaceToScreen body; +}; + + +/* + * Other structs. + */ + struct vmw_screen_object_display { unsigned num_implicit; struct vmw_framebuffer *implicit_fb; + SVGAFifoCmdDefineGMRFB cur; + struct vmw_dma_buffer *pinned_gmrfb; }; /** @@ -57,7 +102,7 @@ struct vmw_screen_object_unit { static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) { - vmw_display_unit_cleanup(&sou->base); + vmw_du_cleanup(&sou->base); kfree(sou); } @@ -72,7 +117,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) } static void vmw_sou_del_active(struct vmw_private *vmw_priv, - struct vmw_screen_object_unit *sou) + struct vmw_screen_object_unit *sou) { struct vmw_screen_object_display *ld = vmw_priv->sou_priv; @@ -84,8 +129,8 @@ static void vmw_sou_del_active(struct vmw_private *vmw_priv, } static void vmw_sou_add_active(struct vmw_private *vmw_priv, - struct vmw_screen_object_unit *sou, - struct vmw_framebuffer *vfb) + struct vmw_screen_object_unit *sou, + struct vmw_framebuffer *vfb) { struct vmw_screen_object_display *ld = vmw_priv->sou_priv; @@ -202,14 +247,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, static void vmw_sou_backing_free(struct vmw_private *dev_priv, struct vmw_screen_object_unit *sou) { - struct ttm_buffer_object *bo; - - if (unlikely(sou->buffer == NULL)) - return; - - bo = &sou->buffer->base; - ttm_bo_unref(&bo); - sou->buffer = NULL; + vmw_dmabuf_unreference(&sou->buffer); sou->buffer_size = 0; } @@ -274,13 +312,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) dev_priv = vmw_priv(crtc->dev); if (set->num_connectors > 1) { - DRM_ERROR("to many connectors\n"); + DRM_ERROR("Too many connectors\n"); return -EINVAL; } if (set->num_connectors == 1 && set->connectors[0] != &sou->base.connector) { - DRM_ERROR("connector doesn't match %p %p\n", + DRM_ERROR("Connector doesn't match %p %p\n", set->connectors[0], &sou->base.connector); return -EINVAL; } @@ -331,7 +369,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) return -EINVAL; } - vmw_fb_off(dev_priv); + vmw_svga_enable(dev_priv); if (mode->hdisplay != crtc->mode.hdisplay || mode->vdisplay != crtc->mode.vdisplay) { @@ -390,6 +428,108 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) return 0; } +/** + * Returns if this unit can be page flipped. + * Must be called with the mode_config mutex held. + */ +static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv, + struct drm_crtc *crtc) +{ + struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); + + if (!sou->base.is_implicit) + return true; + + if (dev_priv->sou_priv->num_implicit != 1) + return false; + + return true; +} + +/** + * Update the implicit fb to the current fb of this crtc. + * Must be called with the mode_config mutex held. + */ +static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv, + struct drm_crtc *crtc) +{ + struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); + + BUG_ON(!sou->base.is_implicit); + + dev_priv->sou_priv->implicit_fb = + vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb); +} + +static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + struct vmw_private *dev_priv = vmw_priv(crtc->dev); + struct drm_framebuffer *old_fb = crtc->primary->fb; + struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); + struct vmw_fence_obj *fence = NULL; + struct drm_clip_rect clips; + int ret; + + /* require ScreenObject support for page flipping */ + if (!dev_priv->sou_priv) + return -ENOSYS; + + if (!vmw_sou_screen_object_flippable(dev_priv, crtc)) + return -EINVAL; + + crtc->primary->fb = fb; + + /* do a full screen dirty update */ + clips.x1 = clips.y1 = 0; + clips.x2 = fb->width; + clips.y2 = fb->height; + + if (vfb->dmabuf) + ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, + &clips, 1, 1, + true, &fence); + else + ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, + &clips, NULL, NULL, + 0, 0, 1, 1, &fence); + + + if (ret != 0) + goto out_no_fence; + if (!fence) { + ret = -EINVAL; + goto out_no_fence; + } + + if (event) { + struct drm_file *file_priv = event->base.file_priv; + + ret = vmw_event_fence_action_queue(file_priv, fence, + &event->base, + &event->event.tv_sec, + &event->event.tv_usec, + true); + } + + /* + * No need to hold on to this now. The only cleanup + * we need to do if we fail is unref the fence. + */ + vmw_fence_obj_unreference(&fence); + + if (vmw_crtc_to_du(crtc)->is_implicit) + vmw_sou_update_implicit_fb(dev_priv, crtc); + + return ret; + +out_no_fence: + crtc->primary->fb = old_fb; + return ret; +} + static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { .save = vmw_du_crtc_save, .restore = vmw_du_crtc_restore, @@ -398,7 +538,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { .gamma_set = vmw_du_crtc_gamma_set, .destroy = vmw_sou_crtc_destroy, .set_config = vmw_sou_crtc_set_config, - .page_flip = vmw_du_page_flip, + .page_flip = vmw_sou_crtc_page_flip, }; /* @@ -423,7 +563,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector) vmw_sou_destroy(vmw_connector_to_sou(connector)); } -static struct drm_connector_funcs vmw_legacy_connector_funcs = { +static struct drm_connector_funcs vmw_sou_connector_funcs = { .dpms = vmw_du_connector_dpms, .save = vmw_du_connector_save, .restore = vmw_du_connector_restore, @@ -458,7 +598,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) sou->base.pref_mode = NULL; sou->base.is_implicit = true; - drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, + drm_connector_init(dev, connector, &vmw_sou_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); connector->status = vmw_du_connector_detect(connector, true); @@ -481,7 +621,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) return 0; } -int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) +int vmw_kms_sou_init_display(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; int i, ret; @@ -516,7 +656,9 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) vmw_sou_init(dev_priv, i); - DRM_INFO("Screen objects system initialized\n"); + dev_priv->active_display_unit = vmw_du_screen_object; + + DRM_INFO("Screen Objects Display Unit initialized\n"); return 0; @@ -529,7 +671,7 @@ err_no_mem: return ret; } -int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) +int vmw_kms_sou_close_display(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; @@ -543,35 +685,369 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) return 0; } +static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer) +{ + struct vmw_dma_buffer *buf = + container_of(framebuffer, struct vmw_framebuffer_dmabuf, + base)->buffer; + int depth = framebuffer->base.depth; + struct { + uint32_t header; + SVGAFifoCmdDefineGMRFB body; + } *cmd; + + /* Emulate RGBA support, contrary to svga_reg.h this is not + * supported by hosts. This is only a problem if we are reading + * this value later and expecting what we uploaded back. + */ + if (depth == 32) + depth = 24; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (!cmd) { + DRM_ERROR("Out of fifo space for dirty framebuffer command.\n"); + return -ENOMEM; + } + + cmd->header = SVGA_CMD_DEFINE_GMRFB; + cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; + cmd->body.format.colorDepth = depth; + cmd->body.format.reserved = 0; + cmd->body.bytesPerLine = framebuffer->base.pitches[0]; + /* Buffer is reserved in vram or GMR */ + vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr); + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + /** - * Returns if this unit can be page flipped. - * Must be called with the mode_config mutex held. + * vmw_sou_surface_fifo_commit - Callback to fill in and submit a + * blit surface to screen command. + * + * @dirty: The closure structure. + * + * Fills in the missing fields in the command, and translates the cliprects + * to match the destination bounding box encoded. */ -bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, - struct drm_crtc *crtc) +static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty) { - struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); + struct vmw_kms_sou_surface_dirty *sdirty = + container_of(dirty, typeof(*sdirty), base); + struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd; + s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x; + s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y; + size_t region_size = dirty->num_hits * sizeof(SVGASignedRect); + SVGASignedRect *blit = (SVGASignedRect *) &cmd[1]; + int i; + + cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN; + cmd->header.size = sizeof(cmd->body) + region_size; + + /* + * Use the destination bounding box to specify destination - and + * source bounding regions. + */ + cmd->body.destRect.left = sdirty->left; + cmd->body.destRect.right = sdirty->right; + cmd->body.destRect.top = sdirty->top; + cmd->body.destRect.bottom = sdirty->bottom; + + cmd->body.srcRect.left = sdirty->left + trans_x; + cmd->body.srcRect.right = sdirty->right + trans_x; + cmd->body.srcRect.top = sdirty->top + trans_y; + cmd->body.srcRect.bottom = sdirty->bottom + trans_y; + + cmd->body.srcImage.sid = sdirty->sid; + cmd->body.destScreenId = dirty->unit->unit; + + /* Blits are relative to the destination rect. Translate. */ + for (i = 0; i < dirty->num_hits; ++i, ++blit) { + blit->left -= sdirty->left; + blit->right -= sdirty->left; + blit->top -= sdirty->top; + blit->bottom -= sdirty->top; + } - if (!sou->base.is_implicit) - return true; + vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd)); - if (dev_priv->sou_priv->num_implicit != 1) - return false; + sdirty->left = sdirty->top = S32_MAX; + sdirty->right = sdirty->bottom = S32_MIN; +} - return true; +/** + * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect. + * + * @dirty: The closure structure + * + * Encodes a SVGASignedRect cliprect and updates the bounding box of the + * BLIT_SURFACE_TO_SCREEN command. + */ +static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty) +{ + struct vmw_kms_sou_surface_dirty *sdirty = + container_of(dirty, typeof(*sdirty), base); + struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd; + SVGASignedRect *blit = (SVGASignedRect *) &cmd[1]; + + /* Destination rect. */ + blit += dirty->num_hits; + blit->left = dirty->unit_x1; + blit->top = dirty->unit_y1; + blit->right = dirty->unit_x2; + blit->bottom = dirty->unit_y2; + + /* Destination bounding box */ + sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1); + sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1); + sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2); + sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2); + + dirty->num_hits++; } /** - * Update the implicit fb to the current fb of this crtc. - * Must be called with the mode_config mutex held. + * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer + * + * @dev_priv: Pointer to the device private structure. + * @framebuffer: Pointer to the surface-buffer backed framebuffer. + * @clips: Array of clip rects. Either @clips or @vclips must be NULL. + * @vclips: Alternate array of clip rects. Either @clips or @vclips must + * be NULL. + * @srf: Pointer to surface to blit from. If NULL, the surface attached + * to @framebuffer will be used. + * @dest_x: X coordinate offset to align @srf with framebuffer coordinates. + * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates. + * @num_clips: Number of clip rects in @clips. + * @inc: Increment to use when looping over @clips. + * @out_fence: If non-NULL, will return a ref-counted pointer to a + * struct vmw_fence_obj. The returned fence pointer may be NULL in which + * case the device has already synchronized. + * + * Returns 0 on success, negative error code on failure. -ERESTARTSYS if + * interrupted. */ -void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, - struct drm_crtc *crtc) +int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + struct vmw_resource *srf, + s32 dest_x, + s32 dest_y, + unsigned num_clips, int inc, + struct vmw_fence_obj **out_fence) { - struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); + struct vmw_framebuffer_surface *vfbs = + container_of(framebuffer, typeof(*vfbs), base); + struct vmw_kms_sou_surface_dirty sdirty; + int ret; - BUG_ON(!sou->base.is_implicit); + if (!srf) + srf = &vfbs->surface->res; - dev_priv->sou_priv->implicit_fb = - vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb); + ret = vmw_kms_helper_resource_prepare(srf, true); + if (ret) + return ret; + + sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit; + sdirty.base.clip = vmw_sou_surface_clip; + sdirty.base.dev_priv = dev_priv; + sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) + + sizeof(SVGASignedRect) * num_clips; + + sdirty.sid = srf->id; + sdirty.left = sdirty.top = S32_MAX; + sdirty.right = sdirty.bottom = S32_MIN; + sdirty.dst_x = dest_x; + sdirty.dst_y = dest_y; + + ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, + dest_x, dest_y, num_clips, inc, + &sdirty.base); + vmw_kms_helper_resource_finish(srf, out_fence); + + return ret; +} + +/** + * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips. + * + * @dirty: The closure structure. + * + * Commits a previously built command buffer of readback clips. + */ +static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) +{ + vmw_fifo_commit(dirty->dev_priv, + sizeof(struct vmw_kms_sou_dmabuf_blit) * + dirty->num_hits); +} + +/** + * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect. + * + * @dirty: The closure structure + * + * Encodes a BLIT_GMRFB_TO_SCREEN cliprect. + */ +static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) +{ + struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd; + + blit += dirty->num_hits; + blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; + blit->body.destScreenId = dirty->unit->unit; + blit->body.srcOrigin.x = dirty->fb_x; + blit->body.srcOrigin.y = dirty->fb_y; + blit->body.destRect.left = dirty->unit_x1; + blit->body.destRect.top = dirty->unit_y1; + blit->body.destRect.right = dirty->unit_x2; + blit->body.destRect.bottom = dirty->unit_y2; + dirty->num_hits++; +} + +/** + * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer + * + * @dev_priv: Pointer to the device private structure. + * @framebuffer: Pointer to the dma-buffer backed framebuffer. + * @clips: Array of clip rects. + * @num_clips: Number of clip rects in @clips. + * @increment: Increment to use when looping over @clips. + * @interruptible: Whether to perform waits interruptible if possible. + * @out_fence: If non-NULL, will return a ref-counted pointer to a + * struct vmw_fence_obj. The returned fence pointer may be NULL in which + * case the device has already synchronized. + * + * Returns 0 on success, negative error code on failure. -ERESTARTSYS if + * interrupted. + */ +int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + unsigned num_clips, int increment, + bool interruptible, + struct vmw_fence_obj **out_fence) +{ + struct vmw_dma_buffer *buf = + container_of(framebuffer, struct vmw_framebuffer_dmabuf, + base)->buffer; + struct vmw_kms_dirty dirty; + int ret; + + ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, + false); + if (ret) + return ret; + + ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer); + if (unlikely(ret != 0)) + goto out_revert; + + dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; + dirty.clip = vmw_sou_dmabuf_clip; + dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * + num_clips; + ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL, + 0, 0, num_clips, increment, &dirty); + vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL); + + return ret; + +out_revert: + vmw_kms_helper_buffer_revert(buf); + + return ret; +} + + +/** + * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips. + * + * @dirty: The closure structure. + * + * Commits a previously built command buffer of readback clips. + */ +static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty) +{ + vmw_fifo_commit(dirty->dev_priv, + sizeof(struct vmw_kms_sou_readback_blit) * + dirty->num_hits); +} + +/** + * vmw_sou_readback_clip - Callback to encode a readback cliprect. + * + * @dirty: The closure structure + * + * Encodes a BLIT_SCREEN_TO_GMRFB cliprect. + */ +static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) +{ + struct vmw_kms_sou_readback_blit *blit = dirty->cmd; + + blit += dirty->num_hits; + blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; + blit->body.srcScreenId = dirty->unit->unit; + blit->body.destOrigin.x = dirty->fb_x; + blit->body.destOrigin.y = dirty->fb_y; + blit->body.srcRect.left = dirty->unit_x1; + blit->body.srcRect.top = dirty->unit_y1; + blit->body.srcRect.right = dirty->unit_x2; + blit->body.srcRect.bottom = dirty->unit_y2; + dirty->num_hits++; +} + +/** + * vmw_kms_sou_readback - Perform a readback from the screen object system to + * a dma-buffer backed framebuffer. + * + * @dev_priv: Pointer to the device private structure. + * @file_priv: Pointer to a struct drm_file identifying the caller. + * Must be set to NULL if @user_fence_rep is NULL. + * @vfb: Pointer to the dma-buffer backed framebuffer. + * @user_fence_rep: User-space provided structure for fence information. + * Must be set to non-NULL if @file_priv is non-NULL. + * @vclips: Array of clip rects. + * @num_clips: Number of clip rects in @vclips. + * + * Returns 0 on success, negative error code on failure. -ERESTARTSYS if + * interrupted. + */ +int vmw_kms_sou_readback(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_vmw_rect *vclips, + uint32_t num_clips) +{ + struct vmw_dma_buffer *buf = + container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; + struct vmw_kms_dirty dirty; + int ret; + + ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false); + if (ret) + return ret; + + ret = do_dmabuf_define_gmrfb(dev_priv, vfb); + if (unlikely(ret != 0)) + goto out_revert; + + dirty.fifo_commit = vmw_sou_readback_fifo_commit; + dirty.clip = vmw_sou_readback_clip; + dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) * + num_clips; + ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips, + 0, 0, num_clips, 1, &dirty); + vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, + user_fence_rep); + + return ret; + +out_revert: + vmw_kms_helper_buffer_revert(buf); + + return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 6a4584a43aa6..bba1ee395478 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -27,12 +27,15 @@ #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" +#include "vmwgfx_binding.h" #include "ttm/ttm_placement.h" struct vmw_shader { struct vmw_resource res; SVGA3dShaderType type; uint32_t size; + uint8_t num_input_sig; + uint8_t num_output_sig; }; struct vmw_user_shader { @@ -40,8 +43,18 @@ struct vmw_user_shader { struct vmw_shader shader; }; +struct vmw_dx_shader { + struct vmw_resource res; + struct vmw_resource *ctx; + struct vmw_resource *cotable; + u32 id; + bool committed; + struct list_head cotable_head; +}; + static uint64_t vmw_user_shader_size; static uint64_t vmw_shader_size; +static size_t vmw_shader_dx_size; static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * @@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_gb_shader_destroy(struct vmw_resource *res); +static int vmw_dx_shader_create(struct vmw_resource *res); +static int vmw_dx_shader_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf); +static int vmw_dx_shader_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf); +static void vmw_dx_shader_commit_notify(struct vmw_resource *res, + enum vmw_cmdbuf_res_state state); +static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); +static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); +static uint64_t vmw_user_shader_size; + static const struct vmw_user_resource_conv user_shader_conv = { .object_type = VMW_RES_SHADER, .base_obj_to_res = vmw_user_shader_base_to_res, @@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = { .unbind = vmw_gb_shader_unbind }; +static const struct vmw_res_func vmw_dx_shader_func = { + .res_type = vmw_res_shader, + .needs_backup = true, + .may_evict = false, + .type_name = "dx shaders", + .backup_placement = &vmw_mob_placement, + .create = vmw_dx_shader_create, + /* + * The destroy callback is only called with a committed resource on + * context destroy, in which case we destroy the cotable anyway, + * so there's no need to destroy DX shaders separately. + */ + .destroy = NULL, + .bind = vmw_dx_shader_bind, + .unbind = vmw_dx_shader_unbind, + .commit_notify = vmw_dx_shader_commit_notify, +}; + /** * Shader management: */ @@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res) return container_of(res, struct vmw_shader, res); } +/** + * vmw_res_to_dx_shader - typecast a struct vmw_resource to a + * struct vmw_dx_shader + * + * @res: Pointer to the struct vmw_resource. + */ +static inline struct vmw_dx_shader * +vmw_res_to_dx_shader(struct vmw_resource *res) +{ + return container_of(res, struct vmw_dx_shader, res); +} + static void vmw_hw_shader_destroy(struct vmw_resource *res) { - (void) vmw_gb_shader_destroy(res); + if (likely(res->func->destroy)) + (void) res->func->destroy(res); + else + res->id = -1; } + static int vmw_gb_shader_init(struct vmw_private *dev_priv, struct vmw_resource *res, uint32_t size, uint64_t offset, SVGA3dShaderType type, + uint8_t num_input_sig, + uint8_t num_output_sig, struct vmw_dma_buffer *byte_code, void (*res_free) (struct vmw_resource *res)) { struct vmw_shader *shader = vmw_res_to_shader(res); int ret; - ret = vmw_resource_init(dev_priv, res, true, - res_free, &vmw_gb_shader_func); - + ret = vmw_resource_init(dev_priv, res, true, res_free, + &vmw_gb_shader_func); if (unlikely(ret != 0)) { if (res_free) @@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, } shader->size = size; shader->type = type; + shader->num_input_sig = num_input_sig; + shader->num_output_sig = num_output_sig; vmw_resource_activate(res, vmw_hw_shader_destroy); return 0; } +/* + * GB shader code: + */ + static int vmw_gb_shader_create(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; @@ -165,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res) cmd->body.type = shader->type; cmd->body.sizeInBytes = shader->size; vmw_fifo_commit(dev_priv, sizeof(*cmd)); - (void) vmw_3d_resource_inc(dev_priv, false); + vmw_fifo_resource_inc(dev_priv); return 0; @@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) return 0; mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_scrub(&res->binding_head); + vmw_binding_res_list_scrub(&res->binding_head); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { @@ -275,12 +341,327 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) vmw_fifo_commit(dev_priv, sizeof(*cmd)); mutex_unlock(&dev_priv->binding_mutex); vmw_resource_release_id(res); - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); + + return 0; +} + +/* + * DX shader code: + */ + +/** + * vmw_dx_shader_commit_notify - Notify that a shader operation has been + * committed to hardware from a user-supplied command stream. + * + * @res: Pointer to the shader resource. + * @state: Indicating whether a creation or removal has been committed. + * + */ +static void vmw_dx_shader_commit_notify(struct vmw_resource *res, + enum vmw_cmdbuf_res_state state) +{ + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); + struct vmw_private *dev_priv = res->dev_priv; + + if (state == VMW_CMDBUF_RES_ADD) { + mutex_lock(&dev_priv->binding_mutex); + vmw_cotable_add_resource(shader->cotable, + &shader->cotable_head); + shader->committed = true; + res->id = shader->id; + mutex_unlock(&dev_priv->binding_mutex); + } else { + mutex_lock(&dev_priv->binding_mutex); + list_del_init(&shader->cotable_head); + shader->committed = false; + res->id = -1; + mutex_unlock(&dev_priv->binding_mutex); + } +} + +/** + * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader. + * + * @res: The shader resource + * + * This function reverts a scrub operation. + */ +static int vmw_dx_shader_unscrub(struct vmw_resource *res) +{ + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXBindShader body; + } *cmd; + + if (!list_empty(&shader->cotable_head) || !shader->committed) + return 0; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), + shader->ctx->id); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "scrubbing.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = shader->ctx->id; + cmd->body.shid = shader->id; + cmd->body.mobid = res->backup->base.mem.start; + cmd->body.offsetInBytes = res->backup_offset; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); + + return 0; +} + +/** + * vmw_dx_shader_create - The DX shader create callback + * + * @res: The DX shader resource + * + * The create callback is called as part of resource validation and + * makes sure that we unscrub the shader if it's previously been scrubbed. + */ +static int vmw_dx_shader_create(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); + int ret = 0; + + WARN_ON_ONCE(!shader->committed); + + if (!list_empty(&res->mob_head)) { + mutex_lock(&dev_priv->binding_mutex); + ret = vmw_dx_shader_unscrub(res); + mutex_unlock(&dev_priv->binding_mutex); + } + + res->id = shader->id; + return ret; +} + +/** + * vmw_dx_shader_bind - The DX shader bind callback + * + * @res: The DX shader resource + * @val_buf: Pointer to the validate buffer. + * + */ +static int vmw_dx_shader_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct ttm_buffer_object *bo = val_buf->bo; + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + mutex_lock(&dev_priv->binding_mutex); + vmw_dx_shader_unscrub(res); + mutex_unlock(&dev_priv->binding_mutex); + + return 0; +} + +/** + * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader. + * + * @res: The shader resource + * + * This function unbinds a MOB from the DX shader without requiring the + * MOB dma_buffer to be reserved. The driver still considers the MOB bound. + * However, once the driver eventually decides to unbind the MOB, it doesn't + * need to access the context. + */ +static int vmw_dx_shader_scrub(struct vmw_resource *res) +{ + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXBindShader body; + } *cmd; + + if (list_empty(&shader->cotable_head)) + return 0; + + WARN_ON_ONCE(!shader->committed); + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "scrubbing.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = shader->ctx->id; + cmd->body.shid = res->id; + cmd->body.mobid = SVGA3D_INVALID_ID; + cmd->body.offsetInBytes = 0; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + res->id = -1; + list_del_init(&shader->cotable_head); return 0; } /** + * vmw_dx_shader_unbind - The dx shader unbind callback. + * + * @res: The shader resource + * @readback: Whether this is a readback unbind. Currently unused. + * @val_buf: MOB buffer information. + */ +static int vmw_dx_shader_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_fence_obj *fence; + int ret; + + BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); + + mutex_lock(&dev_priv->binding_mutex); + ret = vmw_dx_shader_scrub(res); + mutex_unlock(&dev_priv->binding_mutex); + + if (ret) + return ret; + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + vmw_fence_single_bo(val_buf->bo, fence); + + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + + return 0; +} + +/** + * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for + * DX shaders. + * + * @dev_priv: Pointer to device private structure. + * @list: The list of cotable resources. + * @readback: Whether the call was part of a readback unbind. + * + * Scrubs all shader MOBs so that any subsequent shader unbind or shader + * destroy operation won't need to swap in the context. + */ +void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, + struct list_head *list, + bool readback) +{ + struct vmw_dx_shader *entry, *next; + + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); + + list_for_each_entry_safe(entry, next, list, cotable_head) { + WARN_ON(vmw_dx_shader_scrub(&entry->res)); + if (!readback) + entry->committed = false; + } +} + +/** + * vmw_dx_shader_res_free - The DX shader free callback + * + * @res: The shader resource + * + * Frees the DX shader resource and updates memory accounting. + */ +static void vmw_dx_shader_res_free(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); + + vmw_resource_unreference(&shader->cotable); + kfree(shader); + ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); +} + +/** + * vmw_dx_shader_add - Add a shader resource as a command buffer managed + * resource. + * + * @man: The command buffer resource manager. + * @ctx: Pointer to the context resource. + * @user_key: The id used for this shader. + * @shader_type: The shader type. + * @list: The list of staged command buffer managed resources. + */ +int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, + struct vmw_resource *ctx, + u32 user_key, + SVGA3dShaderType shader_type, + struct list_head *list) +{ + struct vmw_dx_shader *shader; + struct vmw_resource *res; + struct vmw_private *dev_priv = ctx->dev_priv; + int ret; + + if (!vmw_shader_dx_size) + vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); + + if (!vmw_shader_id_ok(user_key, shader_type)) + return -EINVAL; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, + false, true); + if (ret) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for shader " + "creation.\n"); + return ret; + } + + shader = kmalloc(sizeof(*shader), GFP_KERNEL); + if (!shader) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); + return -ENOMEM; + } + + res = &shader->res; + shader->ctx = ctx; + shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER); + shader->id = user_key; + shader->committed = false; + INIT_LIST_HEAD(&shader->cotable_head); + ret = vmw_resource_init(dev_priv, res, true, + vmw_dx_shader_res_free, &vmw_dx_shader_func); + if (ret) + goto out_resource_init; + + /* + * The user_key name-space is not per shader type for DX shaders, + * so when hashing, use a single zero shader type. + */ + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, + vmw_shader_key(user_key, 0), + res, list); + if (ret) + goto out_resource_init; + + res->id = shader->id; + vmw_resource_activate(res, vmw_hw_shader_destroy); + +out_resource_init: + vmw_resource_unreference(&res); + + return ret; +} + + + +/** * User-space shader management: */ @@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, size_t shader_size, size_t offset, SVGA3dShaderType shader_type, + uint8_t num_input_sig, + uint8_t num_output_sig, struct ttm_object_file *tfile, u32 *handle) { @@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, */ ret = vmw_gb_shader_init(dev_priv, res, shader_size, - offset, shader_type, buffer, + offset, shader_type, num_input_sig, + num_output_sig, buffer, vmw_user_shader_free); if (unlikely(ret != 0)) goto out; @@ -407,11 +791,11 @@ out: } -struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, - size_t shader_size, - size_t offset, - SVGA3dShaderType shader_type) +static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type) { struct vmw_shader *shader; struct vmw_resource *res; @@ -449,7 +833,7 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, * From here on, the destructor takes over resource freeing. */ ret = vmw_gb_shader_init(dev_priv, res, shader_size, - offset, shader_type, buffer, + offset, shader_type, 0, 0, buffer, vmw_shader_free); out_err: @@ -457,19 +841,20 @@ out_err: } -int vmw_shader_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, + enum drm_vmw_shader_type shader_type_drm, + u32 buffer_handle, size_t size, size_t offset, + uint8_t num_input_sig, uint8_t num_output_sig, + uint32_t *shader_handle) { struct vmw_private *dev_priv = vmw_priv(dev); - struct drm_vmw_shader_create_arg *arg = - (struct drm_vmw_shader_create_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_dma_buffer *buffer = NULL; SVGA3dShaderType shader_type; int ret; - if (arg->buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, + if (buffer_handle != SVGA3D_INVALID_ID) { + ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, &buffer); if (unlikely(ret != 0)) { DRM_ERROR("Could not find buffer for shader " @@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, } if ((u64)buffer->base.num_pages * PAGE_SIZE < - (u64)arg->size + (u64)arg->offset) { + (u64)size + (u64)offset) { DRM_ERROR("Illegal buffer- or shader size.\n"); ret = -EINVAL; goto out_bad_arg; } } - switch (arg->shader_type) { + switch (shader_type_drm) { case drm_vmw_shader_type_vs: shader_type = SVGA3D_SHADERTYPE_VS; break; case drm_vmw_shader_type_ps: shader_type = SVGA3D_SHADERTYPE_PS; break; - case drm_vmw_shader_type_gs: - shader_type = SVGA3D_SHADERTYPE_GS; - break; default: DRM_ERROR("Illegal shader type.\n"); ret = -EINVAL; @@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_bad_arg; - ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, - shader_type, tfile, &arg->shader_handle); + ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset, + shader_type, num_input_sig, + num_output_sig, tfile, shader_handle); ttm_read_unlock(&dev_priv->reservation_sem); out_bad_arg: @@ -515,7 +898,7 @@ out_bad_arg: } /** - * vmw_compat_shader_id_ok - Check whether a compat shader user key and + * vmw_shader_id_ok - Check whether a compat shader user key and * shader type are within valid bounds. * * @user_key: User space id of the shader. @@ -523,13 +906,13 @@ out_bad_arg: * * Returns true if valid false if not. */ -static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) +static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) { return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; } /** - * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. + * vmw_shader_key - Compute a hash key suitable for a compat shader. * * @user_key: User space id of the shader. * @shader_type: Shader type. @@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) * Returns a hash key suitable for a command buffer managed resource * manager hash table. */ -static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) +static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type) { return user_key | (shader_type << 20); } /** - * vmw_compat_shader_remove - Stage a compat shader for removal. + * vmw_shader_remove - Stage a compat shader for removal. * * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is @@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) * @shader_type: Shader type. * @list: Caller's list of staged command buffer resource actions. */ -int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, - u32 user_key, SVGA3dShaderType shader_type, - struct list_head *list) +int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, + u32 user_key, SVGA3dShaderType shader_type, + struct list_head *list) { - if (!vmw_compat_shader_id_ok(user_key, shader_type)) + struct vmw_resource *dummy; + + if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, - vmw_compat_shader_key(user_key, - shader_type), - list); + return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader, + vmw_shader_key(user_key, shader_type), + list, &dummy); } /** @@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, int ret; struct vmw_resource *res; - if (!vmw_compat_shader_id_ok(user_key, shader_type)) + if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; /* Allocate and pin a DMA buffer */ @@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto no_reserve; - ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, - vmw_compat_shader_key(user_key, shader_type), + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, + vmw_shader_key(user_key, shader_type), res, list); vmw_resource_unreference(&res); no_reserve: @@ -639,7 +1023,7 @@ out: } /** - * vmw_compat_shader_lookup - Look up a compat shader + * vmw_shader_lookup - Look up a compat shader * * @man: Pointer to the command buffer managed resource manager identifying * the shader namespace. @@ -650,14 +1034,26 @@ out: * found. An error pointer otherwise. */ struct vmw_resource * -vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, - u32 user_key, - SVGA3dShaderType shader_type) +vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, + u32 user_key, + SVGA3dShaderType shader_type) { - if (!vmw_compat_shader_id_ok(user_key, shader_type)) + if (!vmw_shader_id_ok(user_key, shader_type)) return ERR_PTR(-EINVAL); - return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, - vmw_compat_shader_key(user_key, - shader_type)); + return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader, + vmw_shader_key(user_key, shader_type)); +} + +int vmw_shader_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_shader_create_arg *arg = + (struct drm_vmw_shader_create_arg *)data; + + return vmw_shader_define(dev, file_priv, arg->shader_type, + arg->buffer_handle, + arg->size, arg->offset, + 0, 0, + &arg->shader_handle); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c new file mode 100644 index 000000000000..5a73eebd0f35 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -0,0 +1,555 @@ +/************************************************************************** + * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "vmwgfx_resource_priv.h" +#include "vmwgfx_so.h" +#include "vmwgfx_binding.h" + +/* + * The currently only reason we need to keep track of views is that if we + * destroy a hardware surface, all views pointing to it must also be destroyed, + * otherwise the device will error. + * So in particuar if a surface is evicted, we must destroy all views pointing + * to it, and all context bindings of that view. Similarly we must restore + * the view bindings, views and surfaces pointed to by the views when a + * context is referenced in the command stream. + */ + +/** + * struct vmw_view - view metadata + * + * @res: The struct vmw_resource we derive from + * @ctx: Non-refcounted pointer to the context this view belongs to. + * @srf: Refcounted pointer to the surface pointed to by this view. + * @cotable: Refcounted pointer to the cotable holding this view. + * @srf_head: List head for the surface-to-view list. + * @cotable_head: List head for the cotable-to_view list. + * @view_type: View type. + * @view_id: User-space per context view id. Currently used also as per + * context device view id. + * @cmd_size: Size of the SVGA3D define view command that we've copied from the + * command stream. + * @committed: Whether the view is actually created or pending creation at the + * device level. + * @cmd: The SVGA3D define view command copied from the command stream. + */ +struct vmw_view { + struct rcu_head rcu; + struct vmw_resource res; + struct vmw_resource *ctx; /* Immutable */ + struct vmw_resource *srf; /* Immutable */ + struct vmw_resource *cotable; /* Immutable */ + struct list_head srf_head; /* Protected by binding_mutex */ + struct list_head cotable_head; /* Protected by binding_mutex */ + unsigned view_type; /* Immutable */ + unsigned view_id; /* Immutable */ + u32 cmd_size; /* Immutable */ + bool committed; /* Protected by binding_mutex */ + u32 cmd[1]; /* Immutable */ +}; + +static int vmw_view_create(struct vmw_resource *res); +static int vmw_view_destroy(struct vmw_resource *res); +static void vmw_hw_view_destroy(struct vmw_resource *res); +static void vmw_view_commit_notify(struct vmw_resource *res, + enum vmw_cmdbuf_res_state state); + +static const struct vmw_res_func vmw_view_func = { + .res_type = vmw_res_view, + .needs_backup = false, + .may_evict = false, + .type_name = "DX view", + .backup_placement = NULL, + .create = vmw_view_create, + .commit_notify = vmw_view_commit_notify, +}; + +/** + * struct vmw_view - view define command body stub + * + * @view_id: The device id of the view being defined + * @sid: The surface id of the view being defined + * + * This generic struct is used by the code to change @view_id and @sid of a + * saved view define command. + */ +struct vmw_view_define { + uint32 view_id; + uint32 sid; +}; + +/** + * vmw_view - Convert a struct vmw_resource to a struct vmw_view + * + * @res: Pointer to the resource to convert. + * + * Returns a pointer to a struct vmw_view. + */ +static struct vmw_view *vmw_view(struct vmw_resource *res) +{ + return container_of(res, struct vmw_view, res); +} + +/** + * vmw_view_commit_notify - Notify that a view operation has been committed to + * hardware from a user-supplied command stream. + * + * @res: Pointer to the view resource. + * @state: Indicating whether a creation or removal has been committed. + * + */ +static void vmw_view_commit_notify(struct vmw_resource *res, + enum vmw_cmdbuf_res_state state) +{ + struct vmw_view *view = vmw_view(res); + struct vmw_private *dev_priv = res->dev_priv; + + mutex_lock(&dev_priv->binding_mutex); + if (state == VMW_CMDBUF_RES_ADD) { + struct vmw_surface *srf = vmw_res_to_srf(view->srf); + + list_add_tail(&view->srf_head, &srf->view_list); + vmw_cotable_add_resource(view->cotable, &view->cotable_head); + view->committed = true; + res->id = view->view_id; + + } else { + list_del_init(&view->cotable_head); + list_del_init(&view->srf_head); + view->committed = false; + res->id = -1; + } + mutex_unlock(&dev_priv->binding_mutex); +} + +/** + * vmw_view_create - Create a hardware view. + * + * @res: Pointer to the view resource. + * + * Create a hardware view. Typically used if that view has previously been + * destroyed by an eviction operation. + */ +static int vmw_view_create(struct vmw_resource *res) +{ + struct vmw_view *view = vmw_view(res); + struct vmw_surface *srf = vmw_res_to_srf(view->srf); + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + struct vmw_view_define body; + } *cmd; + + mutex_lock(&dev_priv->binding_mutex); + if (!view->committed) { + mutex_unlock(&dev_priv->binding_mutex); + return 0; + } + + cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size, + view->ctx->id); + if (!cmd) { + DRM_ERROR("Failed reserving FIFO space for view creation.\n"); + mutex_unlock(&dev_priv->binding_mutex); + return -ENOMEM; + } + memcpy(cmd, &view->cmd, view->cmd_size); + WARN_ON(cmd->body.view_id != view->view_id); + /* Sid may have changed due to surface eviction. */ + WARN_ON(view->srf->id == SVGA3D_INVALID_ID); + cmd->body.sid = view->srf->id; + vmw_fifo_commit(res->dev_priv, view->cmd_size); + res->id = view->view_id; + list_add_tail(&view->srf_head, &srf->view_list); + vmw_cotable_add_resource(view->cotable, &view->cotable_head); + mutex_unlock(&dev_priv->binding_mutex); + + return 0; +} + +/** + * vmw_view_destroy - Destroy a hardware view. + * + * @res: Pointer to the view resource. + * + * Destroy a hardware view. Typically used on unexpected termination of the + * owning process or if the surface the view is pointing to is destroyed. + */ +static int vmw_view_destroy(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_view *view = vmw_view(res); + struct { + SVGA3dCmdHeader header; + union vmw_view_destroy body; + } *cmd; + + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); + vmw_binding_res_list_scrub(&res->binding_head); + + if (!view->committed || res->id == -1) + return 0; + + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id); + if (!cmd) { + DRM_ERROR("Failed reserving FIFO space for view " + "destruction.\n"); + return -ENOMEM; + } + + cmd->header.id = vmw_view_destroy_cmds[view->view_type]; + cmd->header.size = sizeof(cmd->body); + cmd->body.view_id = view->view_id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + res->id = -1; + list_del_init(&view->cotable_head); + list_del_init(&view->srf_head); + + return 0; +} + +/** + * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup. + * + * @res: Pointer to the view resource. + * + * Destroy a hardware view if it's still present. + */ +static void vmw_hw_view_destroy(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + + mutex_lock(&dev_priv->binding_mutex); + WARN_ON(vmw_view_destroy(res)); + res->id = -1; + mutex_unlock(&dev_priv->binding_mutex); +} + +/** + * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager + * + * @user_key: The user-space id used for the view. + * @view_type: The view type. + * + * Destroy a hardware view if it's still present. + */ +static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type) +{ + return user_key | (view_type << 20); +} + +/** + * vmw_view_id_ok - Basic view id and type range checks. + * + * @user_key: The user-space id used for the view. + * @view_type: The view type. + * + * Checks that the view id and type (typically provided by user-space) is + * valid. + */ +static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) +{ + return (user_key < SVGA_COTABLE_MAX_IDS && + view_type < vmw_view_max); +} + +/** + * vmw_view_res_free - resource res_free callback for view resources + * + * @res: Pointer to a struct vmw_resource + * + * Frees memory and memory accounting held by a struct vmw_view. + */ +static void vmw_view_res_free(struct vmw_resource *res) +{ + struct vmw_view *view = vmw_view(res); + size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size; + struct vmw_private *dev_priv = res->dev_priv; + + vmw_resource_unreference(&view->cotable); + vmw_resource_unreference(&view->srf); + kfree_rcu(view, rcu); + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); +} + +/** + * vmw_view_add - Create a view resource and stage it for addition + * as a command buffer managed resource. + * + * @man: Pointer to the compat shader manager identifying the shader namespace. + * @ctx: Pointer to a struct vmw_resource identifying the active context. + * @srf: Pointer to a struct vmw_resource identifying the surface the view + * points to. + * @view_type: The view type deduced from the view create command. + * @user_key: The key that is used to identify the shader. The key is + * unique to the view type and to the context. + * @cmd: Pointer to the view create command in the command stream. + * @cmd_size: Size of the view create command in the command stream. + * @list: Caller's list of staged command buffer resource actions. + */ +int vmw_view_add(struct vmw_cmdbuf_res_manager *man, + struct vmw_resource *ctx, + struct vmw_resource *srf, + enum vmw_view_type view_type, + u32 user_key, + const void *cmd, + size_t cmd_size, + struct list_head *list) +{ + static const size_t vmw_view_define_sizes[] = { + [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView), + [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView), + [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView) + }; + + struct vmw_private *dev_priv = ctx->dev_priv; + struct vmw_resource *res; + struct vmw_view *view; + size_t size; + int ret; + + if (cmd_size != vmw_view_define_sizes[view_type] + + sizeof(SVGA3dCmdHeader)) { + DRM_ERROR("Illegal view create command size.\n"); + return -EINVAL; + } + + if (!vmw_view_id_ok(user_key, view_type)) { + DRM_ERROR("Illegal view add view id.\n"); + return -EINVAL; + } + + size = offsetof(struct vmw_view, cmd) + cmd_size; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true); + if (ret) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for view" + " creation.\n"); + return ret; + } + + view = kmalloc(size, GFP_KERNEL); + if (!view) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); + return -ENOMEM; + } + + res = &view->res; + view->ctx = ctx; + view->srf = vmw_resource_reference(srf); + view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]); + view->view_type = view_type; + view->view_id = user_key; + view->cmd_size = cmd_size; + view->committed = false; + INIT_LIST_HEAD(&view->srf_head); + INIT_LIST_HEAD(&view->cotable_head); + memcpy(&view->cmd, cmd, cmd_size); + ret = vmw_resource_init(dev_priv, res, true, + vmw_view_res_free, &vmw_view_func); + if (ret) + goto out_resource_init; + + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view, + vmw_view_key(user_key, view_type), + res, list); + if (ret) + goto out_resource_init; + + res->id = view->view_id; + vmw_resource_activate(res, vmw_hw_view_destroy); + +out_resource_init: + vmw_resource_unreference(&res); + + return ret; +} + +/** + * vmw_view_remove - Stage a view for removal. + * + * @man: Pointer to the view manager identifying the shader namespace. + * @user_key: The key that is used to identify the view. The key is + * unique to the view type. + * @view_type: View type + * @list: Caller's list of staged command buffer resource actions. + * @res_p: If the resource is in an already committed state, points to the + * struct vmw_resource on successful return. The pointer will be + * non ref-counted. + */ +int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, + u32 user_key, enum vmw_view_type view_type, + struct list_head *list, + struct vmw_resource **res_p) +{ + if (!vmw_view_id_ok(user_key, view_type)) { + DRM_ERROR("Illegal view remove view id.\n"); + return -EINVAL; + } + + return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view, + vmw_view_key(user_key, view_type), + list, res_p); +} + +/** + * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable. + * + * @dev_priv: Pointer to a device private struct. + * @list: List of views belonging to a cotable. + * @readback: Unused. Needed for function interface only. + * + * This function evicts all views belonging to a cotable. + * It must be called with the binding_mutex held, and the caller must hold + * a reference to the view resource. This is typically called before the + * cotable is paged out. + */ +void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, + struct list_head *list, + bool readback) +{ + struct vmw_view *entry, *next; + + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); + + list_for_each_entry_safe(entry, next, list, cotable_head) + WARN_ON(vmw_view_destroy(&entry->res)); +} + +/** + * vmw_view_surface_list_destroy - Evict all views pointing to a surface + * + * @dev_priv: Pointer to a device private struct. + * @list: List of views pointing to a surface. + * + * This function evicts all views pointing to a surface. This is typically + * called before the surface is evicted. + */ +void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, + struct list_head *list) +{ + struct vmw_view *entry, *next; + + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); + + list_for_each_entry_safe(entry, next, list, srf_head) + WARN_ON(vmw_view_destroy(&entry->res)); +} + +/** + * vmw_view_srf - Return a non-refcounted pointer to the surface a view is + * pointing to. + * + * @res: pointer to a view resource. + * + * Note that the view itself is holding a reference, so as long + * the view resource is alive, the surface resource will be. + */ +struct vmw_resource *vmw_view_srf(struct vmw_resource *res) +{ + return vmw_view(res)->srf; +} + +/** + * vmw_view_lookup - Look up a view. + * + * @man: The context's cmdbuf ref manager. + * @view_type: The view type. + * @user_key: The view user id. + * + * returns a refcounted pointer to a view or an error pointer if not found. + */ +struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, + enum vmw_view_type view_type, + u32 user_key) +{ + return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view, + vmw_view_key(user_key, view_type)); +} + +const u32 vmw_view_destroy_cmds[] = { + [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, + [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, + [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, +}; + +const SVGACOTableType vmw_view_cotables[] = { + [vmw_view_sr] = SVGA_COTABLE_SRVIEW, + [vmw_view_rt] = SVGA_COTABLE_RTVIEW, + [vmw_view_ds] = SVGA_COTABLE_DSVIEW, +}; + +const SVGACOTableType vmw_so_cotables[] = { + [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT, + [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE, + [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL, + [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE, + [vmw_so_ss] = SVGA_COTABLE_SAMPLER, + [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT +}; + + +/* To remove unused function warning */ +static void vmw_so_build_asserts(void) __attribute__((used)); + + +/* + * This function is unused at run-time, and only used to dump various build + * asserts important for code optimization assumptions. + */ +static void vmw_so_build_asserts(void) +{ + /* Assert that our vmw_view_cmd_to_type() function is correct. */ + BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW != + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1); + BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW != + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2); + BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW != + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3); + BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW != + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4); + BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW != + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5); + + /* Assert that our "one body fits all" assumption is valid */ + BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32)); + + /* Assert that the view key space can hold all view ids. */ + BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1)); + + /* + * Assert that the offset of sid in all view define commands + * is what we assume it to be. + */ + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != + offsetof(SVGA3dCmdDXDefineShaderResourceView, sid)); + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != + offsetof(SVGA3dCmdDXDefineRenderTargetView, sid)); + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != + offsetof(SVGA3dCmdDXDefineDepthStencilView, sid)); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h new file mode 100644 index 000000000000..268738387b5e --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h @@ -0,0 +1,160 @@ +/************************************************************************** + * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +#ifndef VMW_SO_H +#define VMW_SO_H + +enum vmw_view_type { + vmw_view_sr, + vmw_view_rt, + vmw_view_ds, + vmw_view_max, +}; + +enum vmw_so_type { + vmw_so_el, + vmw_so_bs, + vmw_so_ds, + vmw_so_rs, + vmw_so_ss, + vmw_so_so, + vmw_so_max, +}; + +/** + * union vmw_view_destroy - view destruction command body + * + * @rtv: RenderTarget view destruction command body + * @srv: ShaderResource view destruction command body + * @dsv: DepthStencil view destruction command body + * @view_id: A single u32 view id. + * + * The assumption here is that all union members are really represented by a + * single u32 in the command stream. If that's not the case, + * the size of this union will not equal the size of an u32, and the + * assumption is invalid, and we detect that at compile time in the + * vmw_so_build_asserts() function. + */ +union vmw_view_destroy { + struct SVGA3dCmdDXDestroyRenderTargetView rtv; + struct SVGA3dCmdDXDestroyShaderResourceView srv; + struct SVGA3dCmdDXDestroyDepthStencilView dsv; + u32 view_id; +}; + +/* Map enum vmw_view_type to view destroy command ids*/ +extern const u32 vmw_view_destroy_cmds[]; + +/* Map enum vmw_view_type to SVGACOTableType */ +extern const SVGACOTableType vmw_view_cotables[]; + +/* Map enum vmw_so_type to SVGACOTableType */ +extern const SVGACOTableType vmw_so_cotables[]; + +/* + * vmw_view_cmd_to_type - Return the view type for a create or destroy command + * + * @id: The SVGA3D command id. + * + * For a given view create or destroy command id, return the corresponding + * enum vmw_view_type. If the command is unknown, return vmw_view_max. + * The validity of the simplified calculation is verified in the + * vmw_so_build_asserts() function. + */ +static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id) +{ + u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2; + + if (tmp > (u32)vmw_view_max) + return vmw_view_max; + + return (enum vmw_view_type) tmp; +} + +/* + * vmw_so_cmd_to_type - Return the state object type for a + * create or destroy command + * + * @id: The SVGA3D command id. + * + * For a given state object create or destroy command id, + * return the corresponding enum vmw_so_type. If the command is uknown, + * return vmw_so_max. We should perhaps optimize this function using + * a similar strategy as vmw_view_cmd_to_type(). + */ +static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id) +{ + switch (id) { + case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT: + case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT: + return vmw_so_el; + case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE: + case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE: + return vmw_so_bs; + case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE: + case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE: + return vmw_so_ds; + case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE: + case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE: + return vmw_so_rs; + case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE: + case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE: + return vmw_so_ss; + case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT: + case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT: + return vmw_so_so; + default: + break; + } + return vmw_so_max; +} + +/* + * View management - vmwgfx_so.c + */ +extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man, + struct vmw_resource *ctx, + struct vmw_resource *srf, + enum vmw_view_type view_type, + u32 user_key, + const void *cmd, + size_t cmd_size, + struct list_head *list); + +extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, + u32 user_key, enum vmw_view_type view_type, + struct list_head *list, + struct vmw_resource **res_p); + +extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, + struct list_head *view_list); +extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, + struct list_head *list, + bool readback); +extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res); +extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, + enum vmw_view_type view_type, + u32 user_key); +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c new file mode 100644 index 000000000000..c22e2df1b336 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -0,0 +1,1266 @@ +/****************************************************************************** + * + * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + ******************************************************************************/ + +#include "vmwgfx_kms.h" +#include "device_include/svga3d_surfacedefs.h" +#include <drm/drm_plane_helper.h> + +#define vmw_crtc_to_stdu(x) \ + container_of(x, struct vmw_screen_target_display_unit, base.crtc) +#define vmw_encoder_to_stdu(x) \ + container_of(x, struct vmw_screen_target_display_unit, base.encoder) +#define vmw_connector_to_stdu(x) \ + container_of(x, struct vmw_screen_target_display_unit, base.connector) + + + +enum stdu_content_type { + SAME_AS_DISPLAY = 0, + SEPARATE_SURFACE, + SEPARATE_DMA +}; + +/** + * struct vmw_stdu_dirty - closure structure for the update functions + * + * @base: The base type we derive from. Used by vmw_kms_helper_dirty(). + * @transfer: Transfer direction for DMA command. + * @left: Left side of bounding box. + * @right: Right side of bounding box. + * @top: Top side of bounding box. + * @bottom: Bottom side of bounding box. + * @buf: DMA buffer when DMA-ing between buffer and screen targets. + * @sid: Surface ID when copying between surface and screen targets. + */ +struct vmw_stdu_dirty { + struct vmw_kms_dirty base; + SVGA3dTransferType transfer; + s32 left, right, top, bottom; + u32 pitch; + union { + struct vmw_dma_buffer *buf; + u32 sid; + }; +}; + +/* + * SVGA commands that are used by this code. Please see the device headers + * for explanation. + */ +struct vmw_stdu_update { + SVGA3dCmdHeader header; + SVGA3dCmdUpdateGBScreenTarget body; +}; + +struct vmw_stdu_dma { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA body; +}; + +struct vmw_stdu_surface_copy { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceCopy body; +}; + + +/** + * struct vmw_screen_target_display_unit + * + * @base: VMW specific DU structure + * @display_srf: surface to be displayed. The dimension of this will always + * match the display mode. If the display mode matches + * content_vfbs dimensions, then this is a pointer into the + * corresponding field in content_vfbs. If not, then this + * is a separate buffer to which content_vfbs will blit to. + * @content_fb: holds the rendered content, can be a surface or DMA buffer + * @content_type: content_fb type + * @defined: true if the current display unit has been initialized + */ +struct vmw_screen_target_display_unit { + struct vmw_display_unit base; + + struct vmw_surface *display_srf; + struct drm_framebuffer *content_fb; + + enum stdu_content_type content_fb_type; + + bool defined; +}; + + + +static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu); + + + +/****************************************************************************** + * Screen Target Display Unit helper Functions + *****************************************************************************/ + +/** + * vmw_stdu_pin_display - pins the resource associated with the display surface + * + * @stdu: contains the display surface + * + * Since the display surface can either be a private surface allocated by us, + * or it can point to the content surface, we use this function to not pin the + * same resource twice. + */ +static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu) +{ + return vmw_resource_pin(&stdu->display_srf->res, false); +} + + + +/** + * vmw_stdu_unpin_display - unpins the resource associated with display surface + * + * @stdu: contains the display surface + * + * If the display surface was privatedly allocated by + * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it + * won't be automatically cleaned up when all the framebuffers are freed. As + * such, we have to explicitly call vmw_resource_unreference() to get it freed. + */ +static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu) +{ + if (stdu->display_srf) { + struct vmw_resource *res = &stdu->display_srf->res; + + vmw_resource_unpin(res); + + if (stdu->content_fb_type != SAME_AS_DISPLAY) { + vmw_resource_unreference(&res); + stdu->content_fb_type = SAME_AS_DISPLAY; + } + + stdu->display_srf = NULL; + } +} + + + +/****************************************************************************** + * Screen Target Display Unit CRTC Functions + *****************************************************************************/ + + +/** + * vmw_stdu_crtc_destroy - cleans up the STDU + * + * @crtc: used to get a reference to the containing STDU + */ +static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc) +{ + vmw_stdu_destroy(vmw_crtc_to_stdu(crtc)); +} + +/** + * vmw_stdu_define_st - Defines a Screen Target + * + * @dev_priv: VMW DRM device + * @stdu: display unit to create a Screen Target for + * + * Creates a STDU that we can used later. This function is called whenever the + * framebuffer size changes. + * + * RETURNs: + * 0 on success, error code on failure + */ +static int vmw_stdu_define_st(struct vmw_private *dev_priv, + struct vmw_screen_target_display_unit *stdu) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineGBScreenTarget body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Out of FIFO space defining Screen Target\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET; + cmd->header.size = sizeof(cmd->body); + + cmd->body.stid = stdu->base.unit; + cmd->body.width = stdu->display_srf->base_size.width; + cmd->body.height = stdu->display_srf->base_size.height; + cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0; + cmd->body.dpi = 0; + cmd->body.xRoot = stdu->base.crtc.x; + cmd->body.yRoot = stdu->base.crtc.y; + + if (!stdu->base.is_implicit) { + cmd->body.xRoot = stdu->base.gui_x; + cmd->body.yRoot = stdu->base.gui_y; + } + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + stdu->defined = true; + + return 0; +} + + + +/** + * vmw_stdu_bind_st - Binds a surface to a Screen Target + * + * @dev_priv: VMW DRM device + * @stdu: display unit affected + * @res: Buffer to bind to the screen target. Set to NULL to blank screen. + * + * Binding a surface to a Screen Target the same as flipping + */ +static int vmw_stdu_bind_st(struct vmw_private *dev_priv, + struct vmw_screen_target_display_unit *stdu, + struct vmw_resource *res) +{ + SVGA3dSurfaceImageId image; + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBScreenTarget body; + } *cmd; + + + if (!stdu->defined) { + DRM_ERROR("No screen target defined\n"); + return -EINVAL; + } + + /* Set up image using information in vfb */ + memset(&image, 0, sizeof(image)); + image.sid = res ? res->id : SVGA3D_INVALID_ID; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Out of FIFO space binding a screen target\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET; + cmd->header.size = sizeof(cmd->body); + + cmd->body.stid = stdu->base.unit; + cmd->body.image = image; + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a + * bounding box. + * + * @cmd: Pointer to command stream. + * @unit: Screen target unit. + * @left: Left side of bounding box. + * @right: Right side of bounding box. + * @top: Top side of bounding box. + * @bottom: Bottom side of bounding box. + */ +static void vmw_stdu_populate_update(void *cmd, int unit, + s32 left, s32 right, s32 top, s32 bottom) +{ + struct vmw_stdu_update *update = cmd; + + update->header.id = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET; + update->header.size = sizeof(update->body); + + update->body.stid = unit; + update->body.rect.x = left; + update->body.rect.y = top; + update->body.rect.w = right - left; + update->body.rect.h = bottom - top; +} + +/** + * vmw_stdu_update_st - Full update of a Screen Target + * + * @dev_priv: VMW DRM device + * @stdu: display unit affected + * + * This function needs to be called whenever the content of a screen + * target has changed completely. Typically as a result of a backing + * surface change. + * + * RETURNS: + * 0 on success, error code on failure + */ +static int vmw_stdu_update_st(struct vmw_private *dev_priv, + struct vmw_screen_target_display_unit *stdu) +{ + struct vmw_stdu_update *cmd; + struct drm_crtc *crtc = &stdu->base.crtc; + + if (!stdu->defined) { + DRM_ERROR("No screen target defined"); + return -EINVAL; + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Out of FIFO space updating a Screen Target\n"); + return -ENOMEM; + } + + vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay, + 0, crtc->mode.vdisplay); + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + + + +/** + * vmw_stdu_destroy_st - Destroy a Screen Target + * + * @dev_priv: VMW DRM device + * @stdu: display unit to destroy + */ +static int vmw_stdu_destroy_st(struct vmw_private *dev_priv, + struct vmw_screen_target_display_unit *stdu) +{ + int ret; + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyGBScreenTarget body; + } *cmd; + + + /* Nothing to do if not successfully defined */ + if (unlikely(!stdu->defined)) + return 0; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Out of FIFO space, screen target not destroyed\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET; + cmd->header.size = sizeof(cmd->body); + + cmd->body.stid = stdu->base.unit; + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + /* Force sync */ + ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); + if (unlikely(ret != 0)) + DRM_ERROR("Failed to sync with HW"); + + stdu->defined = false; + + return ret; +} + + + +/** + * vmw_stdu_crtc_set_config - Sets a mode + * + * @set: mode parameters + * + * This function is the device-specific portion of the DRM CRTC mode set. + * For the SVGA device, we do this by defining a Screen Target, binding a + * GB Surface to that target, and finally update the screen target. + * + * RETURNS: + * 0 on success, error code otherwise + */ +static int vmw_stdu_crtc_set_config(struct drm_mode_set *set) +{ + struct vmw_private *dev_priv; + struct vmw_screen_target_display_unit *stdu; + struct vmw_framebuffer *vfb; + struct vmw_framebuffer_surface *new_vfbs; + struct drm_display_mode *mode; + struct drm_framebuffer *new_fb; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret; + + + if (!set || !set->crtc) + return -EINVAL; + + crtc = set->crtc; + crtc->x = set->x; + crtc->y = set->y; + stdu = vmw_crtc_to_stdu(crtc); + mode = set->mode; + new_fb = set->fb; + dev_priv = vmw_priv(crtc->dev); + + + if (set->num_connectors > 1) { + DRM_ERROR("Too many connectors\n"); + return -EINVAL; + } + + if (set->num_connectors == 1 && + set->connectors[0] != &stdu->base.connector) { + DRM_ERROR("Connectors don't match %p %p\n", + set->connectors[0], &stdu->base.connector); + return -EINVAL; + } + + + /* Since they always map one to one these are safe */ + connector = &stdu->base.connector; + encoder = &stdu->base.encoder; + + + /* + * After this point the CRTC will be considered off unless a new fb + * is bound + */ + if (stdu->defined) { + /* Unbind current surface by binding an invalid one */ + ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); + if (unlikely(ret != 0)) + return ret; + + /* Update Screen Target, display will now be blank */ + if (crtc->primary->fb) { + vmw_stdu_update_st(dev_priv, stdu); + if (unlikely(ret != 0)) + return ret; + } + + crtc->primary->fb = NULL; + crtc->enabled = false; + encoder->crtc = NULL; + connector->encoder = NULL; + + vmw_stdu_unpin_display(stdu); + stdu->content_fb = NULL; + stdu->content_fb_type = SAME_AS_DISPLAY; + + ret = vmw_stdu_destroy_st(dev_priv, stdu); + /* The hardware is hung, give up */ + if (unlikely(ret != 0)) + return ret; + } + + + /* Any of these conditions means the caller wants CRTC off */ + if (set->num_connectors == 0 || !mode || !new_fb) + return 0; + + + if (set->x + mode->hdisplay > new_fb->width || + set->y + mode->vdisplay > new_fb->height) { + DRM_ERROR("Set outside of framebuffer\n"); + return -EINVAL; + } + + stdu->content_fb = new_fb; + vfb = vmw_framebuffer_to_vfb(stdu->content_fb); + + if (vfb->dmabuf) + stdu->content_fb_type = SEPARATE_DMA; + + /* + * If the requested mode is different than the width and height + * of the FB or if the content buffer is a DMA buf, then allocate + * a display FB that matches the dimension of the mode + */ + if (mode->hdisplay != new_fb->width || + mode->vdisplay != new_fb->height || + stdu->content_fb_type != SAME_AS_DISPLAY) { + struct vmw_surface content_srf; + struct drm_vmw_size display_base_size = {0}; + struct vmw_surface *display_srf; + + + display_base_size.width = mode->hdisplay; + display_base_size.height = mode->vdisplay; + display_base_size.depth = 1; + + /* + * If content buffer is a DMA buf, then we have to construct + * surface info + */ + if (stdu->content_fb_type == SEPARATE_DMA) { + + switch (new_fb->bits_per_pixel) { + case 32: + content_srf.format = SVGA3D_X8R8G8B8; + break; + + case 16: + content_srf.format = SVGA3D_R5G6B5; + break; + + case 8: + content_srf.format = SVGA3D_P8; + break; + + default: + DRM_ERROR("Invalid format\n"); + ret = -EINVAL; + goto err_unref_content; + } + + content_srf.flags = 0; + content_srf.mip_levels[0] = 1; + content_srf.multisample_count = 0; + } else { + + stdu->content_fb_type = SEPARATE_SURFACE; + + new_vfbs = vmw_framebuffer_to_vfbs(new_fb); + content_srf = *new_vfbs->surface; + } + + + ret = vmw_surface_gb_priv_define(crtc->dev, + 0, /* because kernel visible only */ + content_srf.flags, + content_srf.format, + true, /* a scanout buffer */ + content_srf.mip_levels[0], + content_srf.multisample_count, + 0, + display_base_size, + &display_srf); + if (unlikely(ret != 0)) { + DRM_ERROR("Cannot allocate a display FB.\n"); + goto err_unref_content; + } + + stdu->display_srf = display_srf; + } else { + new_vfbs = vmw_framebuffer_to_vfbs(new_fb); + stdu->display_srf = new_vfbs->surface; + } + + + ret = vmw_stdu_pin_display(stdu); + if (unlikely(ret != 0)) { + stdu->display_srf = NULL; + goto err_unref_content; + } + + vmw_svga_enable(dev_priv); + + /* + * Steps to displaying a surface, assume surface is already + * bound: + * 1. define a screen target + * 2. bind a fb to the screen target + * 3. update that screen target (this is done later by + * vmw_kms_stdu_do_surface_dirty_or_present) + */ + ret = vmw_stdu_define_st(dev_priv, stdu); + if (unlikely(ret != 0)) + goto err_unpin_display_and_content; + + ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res); + if (unlikely(ret != 0)) + goto err_unpin_destroy_st; + + + connector->encoder = encoder; + encoder->crtc = crtc; + + crtc->mode = *mode; + crtc->primary->fb = new_fb; + crtc->enabled = true; + + return ret; + +err_unpin_destroy_st: + vmw_stdu_destroy_st(dev_priv, stdu); +err_unpin_display_and_content: + vmw_stdu_unpin_display(stdu); +err_unref_content: + stdu->content_fb = NULL; + return ret; +} + + + +/** + * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target + * + * @crtc: CRTC to attach FB to + * @fb: FB to attach + * @event: Event to be posted. This event should've been alloced + * using k[mz]alloc, and should've been completely initialized. + * @page_flip_flags: Input flags. + * + * If the STDU uses the same display and content buffers, i.e. a true flip, + * this function will replace the existing display buffer with the new content + * buffer. + * + * If the STDU uses different display and content buffers, i.e. a blit, then + * only the content buffer will be updated. + * + * RETURNS: + * 0 on success, error code on failure + */ +static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *new_fb, + struct drm_pending_vblank_event *event, + uint32_t flags) + +{ + struct vmw_private *dev_priv = vmw_priv(crtc->dev); + struct vmw_screen_target_display_unit *stdu; + int ret; + + if (crtc == NULL) + return -EINVAL; + + dev_priv = vmw_priv(crtc->dev); + stdu = vmw_crtc_to_stdu(crtc); + crtc->primary->fb = new_fb; + stdu->content_fb = new_fb; + + if (stdu->display_srf) { + /* + * If the display surface is the same as the content surface + * then remove the reference + */ + if (stdu->content_fb_type == SAME_AS_DISPLAY) { + if (stdu->defined) { + /* Unbind the current surface */ + ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); + if (unlikely(ret != 0)) + goto err_out; + } + vmw_stdu_unpin_display(stdu); + stdu->display_srf = NULL; + } + } + + + if (!new_fb) { + /* Blanks the display */ + (void) vmw_stdu_update_st(dev_priv, stdu); + + return 0; + } + + + if (stdu->content_fb_type == SAME_AS_DISPLAY) { + stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface; + ret = vmw_stdu_pin_display(stdu); + if (ret) { + stdu->display_srf = NULL; + goto err_out; + } + + /* Bind display surface */ + ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res); + if (unlikely(ret != 0)) + goto err_unpin_display_and_content; + } + + /* Update display surface: after this point everything is bound */ + ret = vmw_stdu_update_st(dev_priv, stdu); + if (unlikely(ret != 0)) + return ret; + + if (event) { + struct vmw_fence_obj *fence = NULL; + struct drm_file *file_priv = event->base.file_priv; + + vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + if (!fence) + return -ENOMEM; + + ret = vmw_event_fence_action_queue(file_priv, fence, + &event->base, + &event->event.tv_sec, + &event->event.tv_usec, + true); + vmw_fence_obj_unreference(&fence); + } + + return ret; + +err_unpin_display_and_content: + vmw_stdu_unpin_display(stdu); +err_out: + crtc->primary->fb = NULL; + stdu->content_fb = NULL; + return ret; +} + + +/** + * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect + * + * @dirty: The closure structure. + * + * Encodes a surface DMA command cliprect and updates the bounding box + * for the DMA. + */ +static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) +{ + struct vmw_stdu_dirty *ddirty = + container_of(dirty, struct vmw_stdu_dirty, base); + struct vmw_stdu_dma *cmd = dirty->cmd; + struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; + + blit += dirty->num_hits; + blit->srcx = dirty->fb_x; + blit->srcy = dirty->fb_y; + blit->x = dirty->unit_x1; + blit->y = dirty->unit_y1; + blit->d = 1; + blit->w = dirty->unit_x2 - dirty->unit_x1; + blit->h = dirty->unit_y2 - dirty->unit_y1; + dirty->num_hits++; + + if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) + return; + + /* Destination bounding box */ + ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); + ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); + ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); + ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); +} + +/** + * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command. + * + * @dirty: The closure structure. + * + * Fills in the missing fields in a DMA command, and optionally encodes + * a screen target update command, depending on transfer direction. + */ +static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) +{ + struct vmw_stdu_dirty *ddirty = + container_of(dirty, struct vmw_stdu_dirty, base); + struct vmw_screen_target_display_unit *stdu = + container_of(dirty->unit, typeof(*stdu), base); + struct vmw_stdu_dma *cmd = dirty->cmd; + struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; + SVGA3dCmdSurfaceDMASuffix *suffix = + (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits]; + size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix); + + if (!dirty->num_hits) { + vmw_fifo_commit(dirty->dev_priv, 0); + return; + } + + cmd->header.id = SVGA_3D_CMD_SURFACE_DMA; + cmd->header.size = sizeof(cmd->body) + blit_size; + vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr); + cmd->body.guest.pitch = ddirty->pitch; + cmd->body.host.sid = stdu->display_srf->res.id; + cmd->body.host.face = 0; + cmd->body.host.mipmap = 0; + cmd->body.transfer = ddirty->transfer; + suffix->suffixSize = sizeof(*suffix); + suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE; + + if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) { + blit_size += sizeof(struct vmw_stdu_update); + + vmw_stdu_populate_update(&suffix[1], stdu->base.unit, + ddirty->left, ddirty->right, + ddirty->top, ddirty->bottom); + } + + vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size); + + ddirty->left = ddirty->top = S32_MAX; + ddirty->right = ddirty->bottom = S32_MIN; +} + +/** + * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed + * framebuffer and the screen target system. + * + * @dev_priv: Pointer to the device private structure. + * @file_priv: Pointer to a struct drm-file identifying the caller. May be + * set to NULL, but then @user_fence_rep must also be set to NULL. + * @vfb: Pointer to the dma-buffer backed framebuffer. + * @clips: Array of clip rects. Either @clips or @vclips must be NULL. + * @vclips: Alternate array of clip rects. Either @clips or @vclips must + * be NULL. + * @num_clips: Number of clip rects in @clips or @vclips. + * @increment: Increment to use when looping over @clips or @vclips. + * @to_surface: Whether to DMA to the screen target system as opposed to + * from the screen target system. + * @interruptible: Whether to perform waits interruptible if possible. + * + * If DMA-ing till the screen target system, the function will also notify + * the screen target system that a bounding box of the cliprects has been + * updated. + * Returns 0 on success, negative error code on failure. -ERESTARTSYS if + * interrupted. + */ +int vmw_kms_stdu_dma(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + uint32_t num_clips, + int increment, + bool to_surface, + bool interruptible) +{ + struct vmw_dma_buffer *buf = + container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; + struct vmw_stdu_dirty ddirty; + int ret; + + ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, + false); + if (ret) + return ret; + + ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM : + SVGA3D_READ_HOST_VRAM; + ddirty.left = ddirty.top = S32_MAX; + ddirty.right = ddirty.bottom = S32_MIN; + ddirty.pitch = vfb->base.pitches[0]; + ddirty.buf = buf; + ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; + ddirty.base.clip = vmw_stdu_dmabuf_clip; + ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) + + num_clips * sizeof(SVGA3dCopyBox) + + sizeof(SVGA3dCmdSurfaceDMASuffix); + if (to_surface) + ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update); + + ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips, + 0, 0, num_clips, increment, &ddirty.base); + vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, + user_fence_rep); + + return ret; +} + +/** + * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect + * + * @dirty: The closure structure. + * + * Encodes a surface copy command cliprect and updates the bounding box + * for the copy. + */ +static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty) +{ + struct vmw_stdu_dirty *sdirty = + container_of(dirty, struct vmw_stdu_dirty, base); + struct vmw_stdu_surface_copy *cmd = dirty->cmd; + struct vmw_screen_target_display_unit *stdu = + container_of(dirty->unit, typeof(*stdu), base); + + if (sdirty->sid != stdu->display_srf->res.id) { + struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; + + blit += dirty->num_hits; + blit->srcx = dirty->fb_x; + blit->srcy = dirty->fb_y; + blit->x = dirty->unit_x1; + blit->y = dirty->unit_y1; + blit->d = 1; + blit->w = dirty->unit_x2 - dirty->unit_x1; + blit->h = dirty->unit_y2 - dirty->unit_y1; + } + + dirty->num_hits++; + + /* Destination bounding box */ + sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1); + sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1); + sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2); + sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2); +} + +/** + * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface + * copy command. + * + * @dirty: The closure structure. + * + * Fills in the missing fields in a surface copy command, and encodes a screen + * target update command. + */ +static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty) +{ + struct vmw_stdu_dirty *sdirty = + container_of(dirty, struct vmw_stdu_dirty, base); + struct vmw_screen_target_display_unit *stdu = + container_of(dirty->unit, typeof(*stdu), base); + struct vmw_stdu_surface_copy *cmd = dirty->cmd; + struct vmw_stdu_update *update; + size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits; + size_t commit_size; + + if (!dirty->num_hits) { + vmw_fifo_commit(dirty->dev_priv, 0); + return; + } + + if (sdirty->sid != stdu->display_srf->res.id) { + struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; + + cmd->header.id = SVGA_3D_CMD_SURFACE_COPY; + cmd->header.size = sizeof(cmd->body) + blit_size; + cmd->body.src.sid = sdirty->sid; + cmd->body.dest.sid = stdu->display_srf->res.id; + update = (struct vmw_stdu_update *) &blit[dirty->num_hits]; + commit_size = sizeof(*cmd) + blit_size + sizeof(*update); + } else { + update = dirty->cmd; + commit_size = sizeof(*update); + } + + vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left, + sdirty->right, sdirty->top, sdirty->bottom); + + vmw_fifo_commit(dirty->dev_priv, commit_size); + + sdirty->left = sdirty->top = S32_MAX; + sdirty->right = sdirty->bottom = S32_MIN; +} + +/** + * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer + * + * @dev_priv: Pointer to the device private structure. + * @framebuffer: Pointer to the surface-buffer backed framebuffer. + * @clips: Array of clip rects. Either @clips or @vclips must be NULL. + * @vclips: Alternate array of clip rects. Either @clips or @vclips must + * be NULL. + * @srf: Pointer to surface to blit from. If NULL, the surface attached + * to @framebuffer will be used. + * @dest_x: X coordinate offset to align @srf with framebuffer coordinates. + * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates. + * @num_clips: Number of clip rects in @clips. + * @inc: Increment to use when looping over @clips. + * @out_fence: If non-NULL, will return a ref-counted pointer to a + * struct vmw_fence_obj. The returned fence pointer may be NULL in which + * case the device has already synchronized. + * + * Returns 0 on success, negative error code on failure. -ERESTARTSYS if + * interrupted. + */ +int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + struct vmw_resource *srf, + s32 dest_x, + s32 dest_y, + unsigned num_clips, int inc, + struct vmw_fence_obj **out_fence) +{ + struct vmw_framebuffer_surface *vfbs = + container_of(framebuffer, typeof(*vfbs), base); + struct vmw_stdu_dirty sdirty; + int ret; + + if (!srf) + srf = &vfbs->surface->res; + + ret = vmw_kms_helper_resource_prepare(srf, true); + if (ret) + return ret; + + if (vfbs->is_dmabuf_proxy) { + ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); + if (ret) + goto out_finish; + } + + sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit; + sdirty.base.clip = vmw_kms_stdu_surface_clip; + sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) + + sizeof(SVGA3dCopyBox) * num_clips + + sizeof(struct vmw_stdu_update); + sdirty.sid = srf->id; + sdirty.left = sdirty.top = S32_MAX; + sdirty.right = sdirty.bottom = S32_MIN; + + ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, + dest_x, dest_y, num_clips, inc, + &sdirty.base); +out_finish: + vmw_kms_helper_resource_finish(srf, out_fence); + + return ret; +} + + +/* + * Screen Target CRTC dispatch table + */ +static struct drm_crtc_funcs vmw_stdu_crtc_funcs = { + .save = vmw_du_crtc_save, + .restore = vmw_du_crtc_restore, + .cursor_set = vmw_du_crtc_cursor_set, + .cursor_move = vmw_du_crtc_cursor_move, + .gamma_set = vmw_du_crtc_gamma_set, + .destroy = vmw_stdu_crtc_destroy, + .set_config = vmw_stdu_crtc_set_config, + .page_flip = vmw_stdu_crtc_page_flip, +}; + + + +/****************************************************************************** + * Screen Target Display Unit Encoder Functions + *****************************************************************************/ + +/** + * vmw_stdu_encoder_destroy - cleans up the STDU + * + * @encoder: used the get the containing STDU + * + * vmwgfx cleans up crtc/encoder/connector all at the same time so technically + * this can be a no-op. Nevertheless, it doesn't hurt of have this in case + * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't + * get called. + */ +static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder) +{ + vmw_stdu_destroy(vmw_encoder_to_stdu(encoder)); +} + +static struct drm_encoder_funcs vmw_stdu_encoder_funcs = { + .destroy = vmw_stdu_encoder_destroy, +}; + + + +/****************************************************************************** + * Screen Target Display Unit Connector Functions + *****************************************************************************/ + +/** + * vmw_stdu_connector_destroy - cleans up the STDU + * + * @connector: used to get the containing STDU + * + * vmwgfx cleans up crtc/encoder/connector all at the same time so technically + * this can be a no-op. Nevertheless, it doesn't hurt of have this in case + * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't + * get called. + */ +static void vmw_stdu_connector_destroy(struct drm_connector *connector) +{ + vmw_stdu_destroy(vmw_connector_to_stdu(connector)); +} + + + +static struct drm_connector_funcs vmw_stdu_connector_funcs = { + .dpms = vmw_du_connector_dpms, + .save = vmw_du_connector_save, + .restore = vmw_du_connector_restore, + .detect = vmw_du_connector_detect, + .fill_modes = vmw_du_connector_fill_modes, + .set_property = vmw_du_connector_set_property, + .destroy = vmw_stdu_connector_destroy, +}; + + + +/** + * vmw_stdu_init - Sets up a Screen Target Display Unit + * + * @dev_priv: VMW DRM device + * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS + * + * This function is called once per CRTC, and allocates one Screen Target + * display unit to represent that CRTC. Since the SVGA device does not separate + * out encoder and connector, they are represented as part of the STDU as well. + */ +static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) +{ + struct vmw_screen_target_display_unit *stdu; + struct drm_device *dev = dev_priv->dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + + stdu = kzalloc(sizeof(*stdu), GFP_KERNEL); + if (!stdu) + return -ENOMEM; + + stdu->base.unit = unit; + crtc = &stdu->base.crtc; + encoder = &stdu->base.encoder; + connector = &stdu->base.connector; + + stdu->base.pref_active = (unit == 0); + stdu->base.pref_width = dev_priv->initial_width; + stdu->base.pref_height = dev_priv->initial_height; + stdu->base.is_implicit = true; + + drm_connector_init(dev, connector, &vmw_stdu_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + connector->status = vmw_du_connector_detect(connector, false); + + drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL); + drm_mode_connector_attach_encoder(connector, encoder); + encoder->possible_crtcs = (1 << unit); + encoder->possible_clones = 0; + + (void) drm_connector_register(connector); + + drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs); + + drm_mode_crtc_set_gamma_size(crtc, 256); + + drm_object_attach_property(&connector->base, + dev->mode_config.dirty_info_property, + 1); + + return 0; +} + + + +/** + * vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit + * + * @stdu: Screen Target Display Unit to be destroyed + * + * Clean up after vmw_stdu_init + */ +static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu) +{ + vmw_stdu_unpin_display(stdu); + + vmw_du_cleanup(&stdu->base); + kfree(stdu); +} + + + +/****************************************************************************** + * Screen Target Display KMS Functions + * + * These functions are called by the common KMS code in vmwgfx_kms.c + *****************************************************************************/ + +/** + * vmw_kms_stdu_init_display - Initializes a Screen Target based display + * + * @dev_priv: VMW DRM device + * + * This function initialize a Screen Target based display device. It checks + * the capability bits to make sure the underlying hardware can support + * screen targets, and then creates the maximum number of CRTCs, a.k.a Display + * Units, as supported by the display hardware. + * + * RETURNS: + * 0 on success, error code otherwise + */ +int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + int i, ret; + + + /* Do nothing if Screen Target support is turned off */ + if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) + return -ENOSYS; + + if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) + return -ENOSYS; + + ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); + if (unlikely(ret != 0)) + return ret; + + ret = drm_mode_create_dirty_info_property(dev); + if (unlikely(ret != 0)) + goto err_vblank_cleanup; + + dev_priv->active_display_unit = vmw_du_screen_target; + + for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { + ret = vmw_stdu_init(dev_priv, i); + + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to initialize STDU %d", i); + goto err_vblank_cleanup; + } + } + + DRM_INFO("Screen Target Display device initialized\n"); + + return 0; + +err_vblank_cleanup: + drm_vblank_cleanup(dev); + return ret; +} + + + +/** + * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display + * + * @dev_priv: VMW DRM device + * + * Frees up any resources allocated by vmw_kms_stdu_init_display + * + * RETURNS: + * 0 on success + */ +int vmw_kms_stdu_close_display(struct vmw_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + + drm_vblank_cleanup(dev); + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 4ecdbf3e59da..5b8595b78429 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -27,8 +27,11 @@ #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" +#include "vmwgfx_so.h" +#include "vmwgfx_binding.h" #include <ttm/ttm_placement.h> -#include "svga3d_surfacedefs.h" +#include "device_include/svga3d_surfacedefs.h" + /** * struct vmw_user_surface - User-space visible surface resource @@ -36,7 +39,7 @@ * @base: The TTM base object handling user-space visibility. * @srf: The surface metadata. * @size: TTM accounting size for the surface. - * @master: master of the creating client. Used for security check. + * @master: master of the creating client. Used for security check. */ struct vmw_user_surface { struct ttm_prime_object prime; @@ -220,7 +223,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf, cmd->header.size = cmd_len; cmd->body.sid = srf->res.id; cmd->body.surfaceFlags = srf->flags; - cmd->body.format = cpu_to_le32(srf->format); + cmd->body.format = srf->format; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) cmd->body.face[i].numMipLevels = srf->mip_levels[i]; @@ -340,7 +343,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) dev_priv->used_memory_size -= res->backup_size; mutex_unlock(&dev_priv->cmdbuf_mutex); } - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); } /** @@ -576,14 +579,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv, BUG_ON(res_free == NULL); if (!dev_priv->has_mob) - (void) vmw_3d_resource_inc(dev_priv, false); + vmw_fifo_resource_inc(dev_priv); ret = vmw_resource_init(dev_priv, res, true, res_free, (dev_priv->has_mob) ? &vmw_gb_surface_func : &vmw_legacy_surface_func); if (unlikely(ret != 0)) { if (!dev_priv->has_mob) - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); res_free(res); return ret; } @@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv, * surface validate. */ + INIT_LIST_HEAD(&srf->view_list); vmw_resource_activate(res, vmw_hw_surface_destroy); return ret; } @@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, desc = svga3dsurface_get_desc(req->format); if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { DRM_ERROR("Invalid surface format for surface creation.\n"); + DRM_ERROR("Format requested is: %d\n", req->format); return -EINVAL; } @@ -1018,17 +1023,21 @@ static int vmw_gb_surface_create(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; struct vmw_surface *srf = vmw_res_to_srf(res); - uint32_t cmd_len, submit_len; + uint32_t cmd_len, cmd_id, submit_len; int ret; struct { SVGA3dCmdHeader header; SVGA3dCmdDefineGBSurface body; } *cmd; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineGBSurface_v2 body; + } *cmd2; if (likely(res->id != -1)) return 0; - (void) vmw_3d_resource_inc(dev_priv, false); + vmw_fifo_resource_inc(dev_priv); ret = vmw_resource_alloc_id(res); if (unlikely(ret != 0)) { DRM_ERROR("Failed to allocate a surface id.\n"); @@ -1040,9 +1049,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res) goto out_no_fifo; } - cmd_len = sizeof(cmd->body); - submit_len = sizeof(*cmd); + if (srf->array_size > 0) { + /* has_dx checked on creation time. */ + cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; + cmd_len = sizeof(cmd2->body); + submit_len = sizeof(*cmd2); + } else { + cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE; + cmd_len = sizeof(cmd->body); + submit_len = sizeof(*cmd); + } + cmd = vmw_fifo_reserve(dev_priv, submit_len); + cmd2 = (typeof(cmd2))cmd; if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving FIFO space for surface " "creation.\n"); @@ -1050,17 +1069,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res) goto out_no_fifo; } - cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; - cmd->header.size = cmd_len; - cmd->body.sid = srf->res.id; - cmd->body.surfaceFlags = srf->flags; - cmd->body.format = cpu_to_le32(srf->format); - cmd->body.numMipLevels = srf->mip_levels[0]; - cmd->body.multisampleCount = srf->multisample_count; - cmd->body.autogenFilter = srf->autogen_filter; - cmd->body.size.width = srf->base_size.width; - cmd->body.size.height = srf->base_size.height; - cmd->body.size.depth = srf->base_size.depth; + if (srf->array_size > 0) { + cmd2->header.id = cmd_id; + cmd2->header.size = cmd_len; + cmd2->body.sid = srf->res.id; + cmd2->body.surfaceFlags = srf->flags; + cmd2->body.format = cpu_to_le32(srf->format); + cmd2->body.numMipLevels = srf->mip_levels[0]; + cmd2->body.multisampleCount = srf->multisample_count; + cmd2->body.autogenFilter = srf->autogen_filter; + cmd2->body.size.width = srf->base_size.width; + cmd2->body.size.height = srf->base_size.height; + cmd2->body.size.depth = srf->base_size.depth; + cmd2->body.arraySize = srf->array_size; + } else { + cmd->header.id = cmd_id; + cmd->header.size = cmd_len; + cmd->body.sid = srf->res.id; + cmd->body.surfaceFlags = srf->flags; + cmd->body.format = cpu_to_le32(srf->format); + cmd->body.numMipLevels = srf->mip_levels[0]; + cmd->body.multisampleCount = srf->multisample_count; + cmd->body.autogenFilter = srf->autogen_filter; + cmd->body.size.width = srf->base_size.width; + cmd->body.size.height = srf->base_size.height; + cmd->body.size.depth = srf->base_size.depth; + } + vmw_fifo_commit(dev_priv, submit_len); return 0; @@ -1068,7 +1103,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) out_no_fifo: vmw_resource_release_id(res); out_no_id: - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); return ret; } @@ -1188,6 +1223,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, static int vmw_gb_surface_destroy(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; + struct vmw_surface *srf = vmw_res_to_srf(res); struct { SVGA3dCmdHeader header; SVGA3dCmdDestroyGBSurface body; @@ -1197,7 +1233,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) return 0; mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_scrub(&res->binding_head); + vmw_view_surface_list_destroy(dev_priv, &srf->view_list); + vmw_binding_res_list_scrub(&res->binding_head); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { @@ -1213,11 +1250,12 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) vmw_fifo_commit(dev_priv, sizeof(*cmd)); mutex_unlock(&dev_priv->binding_mutex); vmw_resource_release_id(res); - vmw_3d_resource_dec(dev_priv, false); + vmw_fifo_resource_dec(dev_priv); return 0; } + /** * vmw_gb_surface_define_ioctl - Ioctl function implementing * the user surface define functionality. @@ -1241,77 +1279,51 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; int ret; uint32_t size; - const struct svga3d_surface_desc *desc; uint32_t backup_handle; + if (unlikely(vmw_user_surface_size == 0)) vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 128; size = vmw_user_surface_size + 128; - desc = svga3dsurface_get_desc(req->format); - if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { - DRM_ERROR("Invalid surface format for surface creation.\n"); - return -EINVAL; - } - - ret = ttm_read_lock(&dev_priv->reservation_sem, true); + /* Define a surface based on the parameters. */ + ret = vmw_surface_gb_priv_define(dev, + size, + req->svga3d_flags, + req->format, + req->drm_surface_flags & drm_vmw_surface_flag_scanout, + req->mip_levels, + req->multisample_count, + req->array_size, + req->base_size, + &srf); if (unlikely(ret != 0)) return ret; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - size, false, true); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface" - " creation.\n"); - goto out_unlock; - } - - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); - if (unlikely(user_srf == NULL)) { - ret = -ENOMEM; - goto out_no_user_srf; - } - - srf = &user_srf->srf; - res = &srf->res; - - srf->flags = req->svga3d_flags; - srf->format = req->format; - srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; - srf->mip_levels[0] = req->mip_levels; - srf->num_sizes = 1; - srf->sizes = NULL; - srf->offsets = NULL; - user_srf->size = size; - srf->base_size = req->base_size; - srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; - srf->multisample_count = req->multisample_count; - res->backup_size = svga3dsurface_get_serialized_size - (srf->format, srf->base_size, srf->mip_levels[0], - srf->flags & SVGA3D_SURFACE_CUBEMAP); - - user_srf->prime.base.shareable = false; - user_srf->prime.base.tfile = NULL; + user_srf = container_of(srf, struct vmw_user_surface, srf); if (drm_is_primary_client(file_priv)) user_srf->master = drm_master_get(file_priv->master); - /** - * From this point, the generic resource management functions - * destroy the object on failure. - */ - - ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); + ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (unlikely(ret != 0)) - goto out_unlock; + return ret; + + res = &user_srf->srf.res; + if (req->buffer_handle != SVGA3D_INVALID_ID) { ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, &res->backup); - } else if (req->drm_surface_flags & - drm_vmw_surface_flag_create_buffer) + if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < + res->backup_size) { + DRM_ERROR("Surface backup buffer is too small.\n"); + vmw_dmabuf_unreference(&res->backup); + ret = -EINVAL; + goto out_unlock; + } + } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) ret = vmw_user_dmabuf_alloc(dev_priv, tfile, res->backup_size, req->drm_surface_flags & @@ -1324,7 +1336,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - tmp = vmw_resource_reference(&srf->res); + tmp = vmw_resource_reference(res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->drm_surface_flags & drm_vmw_surface_flag_shareable, @@ -1337,7 +1349,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - rep->handle = user_srf->prime.base.hash.key; + rep->handle = user_srf->prime.base.hash.key; rep->backup_size = res->backup_size; if (res->backup) { rep->buffer_map_handle = @@ -1352,10 +1364,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, vmw_resource_unreference(&res); - ttm_read_unlock(&dev_priv->reservation_sem); - return 0; -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); out_unlock: ttm_read_unlock(&dev_priv->reservation_sem); return ret; @@ -1415,6 +1423,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, rep->creq.drm_surface_flags = 0; rep->creq.multisample_count = srf->multisample_count; rep->creq.autogen_filter = srf->autogen_filter; + rep->creq.array_size = srf->array_size; rep->creq.buffer_handle = backup_handle; rep->creq.base_size = srf->base_size; rep->crep.handle = user_srf->prime.base.hash.key; @@ -1429,3 +1438,137 @@ out_bad_resource: return ret; } + +/** + * vmw_surface_gb_priv_define - Define a private GB surface + * + * @dev: Pointer to a struct drm_device + * @user_accounting_size: Used to track user-space memory usage, set + * to 0 for kernel mode only memory + * @svga3d_flags: SVGA3d surface flags for the device + * @format: requested surface format + * @for_scanout: true if inteded to be used for scanout buffer + * @num_mip_levels: number of MIP levels + * @multisample_count: + * @array_size: Surface array size. + * @size: width, heigh, depth of the surface requested + * @user_srf_out: allocated user_srf. Set to NULL on failure. + * + * GB surfaces allocated by this function will not have a user mode handle, and + * thus will only be visible to vmwgfx. For optimization reasons the + * surface may later be given a user mode handle by another function to make + * it available to user mode drivers. + */ +int vmw_surface_gb_priv_define(struct drm_device *dev, + uint32_t user_accounting_size, + uint32_t svga3d_flags, + SVGA3dSurfaceFormat format, + bool for_scanout, + uint32_t num_mip_levels, + uint32_t multisample_count, + uint32_t array_size, + struct drm_vmw_size size, + struct vmw_surface **srf_out) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_surface *user_srf; + struct vmw_surface *srf; + int ret; + u32 num_layers; + + *srf_out = NULL; + + if (for_scanout) { + if (!svga3dsurface_is_screen_target_format(format)) { + DRM_ERROR("Invalid Screen Target surface format."); + return -EINVAL; + } + } else { + const struct svga3d_surface_desc *desc; + + desc = svga3dsurface_get_desc(format); + if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { + DRM_ERROR("Invalid surface format.\n"); + return -EINVAL; + } + } + + /* array_size must be null for non-GL3 host. */ + if (array_size > 0 && !dev_priv->has_dx) { + DRM_ERROR("Tried to create DX surface on non-DX host.\n"); + return -EINVAL; + } + + ret = ttm_read_lock(&dev_priv->reservation_sem, true); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + user_accounting_size, false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for surface" + " creation.\n"); + goto out_unlock; + } + + user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); + if (unlikely(user_srf == NULL)) { + ret = -ENOMEM; + goto out_no_user_srf; + } + + *srf_out = &user_srf->srf; + user_srf->size = user_accounting_size; + user_srf->prime.base.shareable = false; + user_srf->prime.base.tfile = NULL; + + srf = &user_srf->srf; + srf->flags = svga3d_flags; + srf->format = format; + srf->scanout = for_scanout; + srf->mip_levels[0] = num_mip_levels; + srf->num_sizes = 1; + srf->sizes = NULL; + srf->offsets = NULL; + srf->base_size = size; + srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; + srf->array_size = array_size; + srf->multisample_count = multisample_count; + + if (array_size) + num_layers = array_size; + else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP) + num_layers = SVGA3D_MAX_SURFACE_FACES; + else + num_layers = 1; + + srf->res.backup_size = + svga3dsurface_get_serialized_size(srf->format, + srf->base_size, + srf->mip_levels[0], + num_layers); + + if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) + srf->res.backup_size += sizeof(SVGA3dDXSOState); + + if (dev_priv->active_display_unit == vmw_du_screen_target && + for_scanout) + srf->flags |= SVGA3D_SURFACE_SCREENTARGET; + + /* + * From this point, the generic resource management functions + * destroy the object on failure. + */ + ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); + + ttm_read_unlock(&dev_priv->reservation_sem); + return ret; + +out_no_user_srf: + ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size); + +out_unlock: + ttm_read_unlock(&dev_priv->reservation_sem); + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 98d6bfb3a997..e771091d2cd3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a |