diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
94 files changed, 6665 insertions, 4357 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index b30fcfa4b1f2..7335c0420c70 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -15,3 +15,13 @@ config DRM_AMDGPU_USERPTR help This option selects CONFIG_MMU_NOTIFIER if it isn't already selected to enabled full userptr support. + +config DRM_AMDGPU_GART_DEBUGFS + bool "Allow GART access through debugfs" + depends on DRM_AMDGPU + depends on DEBUG_FS + default n + help + Selecting this option creates a debugfs file to inspect the mapped + pages. Uses more memory for housekeeping, enable only for debugging. + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c4a21c6428f5..8ebc5f1eb4c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -85,8 +85,12 @@ extern int amdgpu_vm_debug; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_powerplay; +extern int amdgpu_powercontainment; extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; +extern unsigned amdgpu_cg_mask; +extern unsigned amdgpu_pg_mask; +extern char *amdgpu_disable_cu; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int amdgpu_set_powergating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_powergating_state state); +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); struct amdgpu_ip_block_version { enum amd_ip_block_type type; @@ -283,7 +291,8 @@ struct amdgpu_ring_funcs { int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); /* command emit functions */ void (*emit_ib)(struct amdgpu_ring *ring, - struct amdgpu_ib *ib); + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); void (*emit_pipeline_sync)(struct amdgpu_ring *ring); @@ -297,11 +306,16 @@ struct amdgpu_ring_funcs { uint32_t oa_base, uint32_t oa_size); /* testing functions */ int (*test_ring)(struct amdgpu_ring *ring); - int (*test_ib)(struct amdgpu_ring *ring); + int (*test_ib)(struct amdgpu_ring *ring, long timeout); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); /* pad the indirect buffer to the necessary number of dw */ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); + unsigned (*init_cond_exec)(struct amdgpu_ring *ring); + void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); + /* note usage for clock and power gating */ + void (*begin_use)(struct amdgpu_ring *ring); + void (*end_use)(struct amdgpu_ring *ring); }; /* @@ -365,13 +379,6 @@ struct amdgpu_fence_driver { #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) #define AMDGPU_FENCE_FLAG_INT (1 << 1) -struct amdgpu_user_fence { - /* write-back bo */ - struct amdgpu_bo *bo; - /* write-back address offset to bo start */ - uint32_t offset; -}; - int amdgpu_fence_driver_init(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); @@ -391,6 +398,14 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); /* * TTM. */ + +#define AMDGPU_TTM_LRU_SIZE 20 + +struct amdgpu_mman_lru { + struct list_head *lru[TTM_NUM_MEM_TYPES]; + struct list_head *swap_lru; +}; + struct amdgpu_mman { struct ttm_bo_global_ref bo_global_ref; struct drm_global_reference mem_global_ref; @@ -408,6 +423,9 @@ struct amdgpu_mman { struct amdgpu_ring *buffer_funcs_ring; /* Scheduler entity for buffer moves */ struct amd_sched_entity entity; + + /* custom LRU management */ + struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; }; int amdgpu_copy_buffer(struct amdgpu_ring *ring, @@ -494,9 +512,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg); +struct drm_gem_object * +amdgpu_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg); struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gobj, int flags); @@ -586,11 +605,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); -int amdgpu_sync_wait(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); +int amdgpu_fence_slab_init(void); +void amdgpu_fence_slab_fini(void); /* * GART structures, functions & helpers @@ -609,8 +631,9 @@ struct amdgpu_gart { unsigned num_gpu_pages; unsigned num_cpu_pages; unsigned table_size; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS struct page **pages; - dma_addr_t *pages_addr; +#endif bool ready; const struct amdgpu_gart_funcs *gart_funcs; }; @@ -709,6 +732,7 @@ struct amdgpu_flip_work { unsigned shared_count; struct fence **shared; struct fence_cb cb; + bool async; }; @@ -721,17 +745,7 @@ struct amdgpu_ib { uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; - struct amdgpu_user_fence *user; - struct amdgpu_vm *vm; - unsigned vm_id; - uint64_t vm_pd_addr; - struct amdgpu_ctx *ctx; - uint32_t gds_base, gds_size; - uint32_t gws_base, gws_size; - uint32_t oa_base, oa_size; uint32_t flags; - /* resulting sequence number */ - uint64_t sequence; }; enum amdgpu_ring_type { @@ -742,12 +756,14 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_VCE }; -extern struct amd_sched_backend_ops amdgpu_sched_ops; +extern const struct amd_sched_backend_ops amdgpu_sched_ops; int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job); + struct amdgpu_job **job, struct amdgpu_vm *vm); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); + +void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, @@ -757,14 +773,11 @@ struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; - struct amd_gpu_scheduler sched; + struct amd_gpu_scheduler sched; - spinlock_t fence_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; - u64 next_rptr_gpu_addr; - volatile u32 *next_rptr_cpu_addr; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -783,11 +796,16 @@ struct amdgpu_ring { u32 doorbell_index; bool use_doorbell; unsigned wptr_offs; - unsigned next_rptr_offs; unsigned fence_offs; - struct amdgpu_ctx *current_ctx; + uint64_t current_ctx; enum amdgpu_ring_type type; char name[16]; + unsigned cond_exe_offs; + u64 cond_exe_gpu_addr; + volatile u32 *cond_exe_cpu_addr; +#if defined(CONFIG_DEBUG_FS) + struct dentry *ent; +#endif }; /* @@ -830,13 +848,6 @@ struct amdgpu_vm_pt { uint64_t addr; }; -struct amdgpu_vm_id { - struct amdgpu_vm_manager_id *mgr_id; - uint64_t pd_gpu_addr; - /* last flushed PD/PT update */ - struct fence *flushed_updates; -}; - struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root va; @@ -857,24 +868,36 @@ struct amdgpu_vm { struct amdgpu_bo *page_directory; unsigned max_pde_used; struct fence *page_directory_fence; + uint64_t last_eviction_counter; /* array of page tables, one for each page directory entry */ struct amdgpu_vm_pt *page_tables; /* for id and flush management per ring */ - struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; + struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; /* protecting freed */ spinlock_t freed_lock; /* Scheduler entity for page table updates */ struct amd_sched_entity entity; + + /* client id */ + u64 client_id; }; -struct amdgpu_vm_manager_id { +struct amdgpu_vm_id { struct list_head list; - struct fence *active; - atomic_long_t owner; + struct fence *first; + struct amdgpu_sync active; + struct fence *last_flush; + atomic64_t owner; + + uint64_t pd_gpu_addr; + /* last flushed PD/PT update */ + struct fence *flushed_updates; + + uint32_t current_gpu_reset_count; uint32_t gds_base; uint32_t gds_size; @@ -889,7 +912,11 @@ struct amdgpu_vm_manager { struct mutex lock; unsigned num_ids; struct list_head ids_lru; - struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; + struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; + + /* Handling of VM fences */ + u64 fence_context; + unsigned seqno[AMDGPU_MAX_RINGS]; uint32_t max_pfn; /* vram base address for page table entry */ @@ -901,6 +928,8 @@ struct amdgpu_vm_manager { struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; unsigned vm_pte_num_rings; atomic_t vm_pte_next_ring; + /* client id counter */ + atomic64_t client_counter; }; void amdgpu_vm_manager_init(struct amdgpu_device *adev); @@ -910,17 +939,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr); -void amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size); + struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, @@ -1026,6 +1052,11 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list); */ #include "clearstate_defs.h" +struct amdgpu_rlc_funcs { + void (*enter_safe_mode)(struct amdgpu_device *adev); + void (*exit_safe_mode)(struct amdgpu_device *adev); +}; + struct amdgpu_rlc { /* for power gating */ struct amdgpu_bo *save_restore_obj; @@ -1044,6 +1075,24 @@ struct amdgpu_rlc { uint64_t cp_table_gpu_addr; volatile uint32_t *cp_table_ptr; u32 cp_table_size; + + /* safe mode for updating CG/PG state */ + bool in_safe_mode; + const struct amdgpu_rlc_funcs *funcs; + + /* for firmware data */ + u32 save_and_restore_offset; + u32 clear_state_descriptor_offset; + u32 avail_scratch_ram_locations; + u32 reg_restore_list_size; + u32 reg_list_format_start; + u32 reg_list_format_separate_start; + u32 starting_offsets_start; + u32 reg_list_format_size_bytes; + u32 reg_list_size_bytes; + + u32 *register_list_format; + u32 *register_restore; }; struct amdgpu_mec { @@ -1097,6 +1146,18 @@ struct amdgpu_gca_config { uint32_t macrotile_mode_array[16]; }; +struct amdgpu_cu_info { + uint32_t number; /* total active CU number */ + uint32_t ao_cu_mask; + uint32_t bitmap[4][4]; +}; + +struct amdgpu_gfx_funcs { + /* get the gpu clock counter */ + uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); + void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); +}; + struct amdgpu_gfx { struct mutex gpu_clock_mutex; struct amdgpu_gca_config config; @@ -1129,17 +1190,20 @@ struct amdgpu_gfx { struct amdgpu_irq_src priv_reg_irq; struct amdgpu_irq_src priv_inst_irq; /* gfx status */ - uint32_t gfx_current_status; + uint32_t gfx_current_status; /* ce ram size*/ - unsigned ce_ram_size; + unsigned ce_ram_size; + struct amdgpu_cu_info cu_info; + const struct amdgpu_gfx_funcs *funcs; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); -void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f); +void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, + struct fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ib, struct fence *last_vm_update, - struct fence **f); + struct amdgpu_job *job, struct fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); @@ -1148,10 +1212,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data); -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, u32 nop, u32 align_mask, struct amdgpu_irq_src *irq_src, unsigned irq_type, @@ -1164,7 +1224,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring); struct amdgpu_cs_chunk { uint32_t chunk_id; uint32_t length_dw; - uint32_t *kdata; + void *kdata; }; struct amdgpu_cs_parser { @@ -1195,13 +1255,25 @@ struct amdgpu_cs_parser { struct amdgpu_job { struct amd_sched_job base; struct amdgpu_device *adev; + struct amdgpu_vm *vm; struct amdgpu_ring *ring; struct amdgpu_sync sync; struct amdgpu_ib *ibs; struct fence *fence; /* the hw fence */ uint32_t num_ibs; void *owner; - struct amdgpu_user_fence uf; + uint64_t ctx; + bool vm_needs_flush; + unsigned vm_id; + uint64_t vm_pd_addr; + uint32_t gds_base, gds_size; + uint32_t gws_base, gws_size; + uint32_t oa_base, oa_size; + + /* user fence handling */ + uint64_t uf_addr; + uint64_t uf_sequence; + }; #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) @@ -1501,6 +1573,12 @@ struct amdgpu_dpm_funcs { u32 (*get_fan_control_mode)(struct amdgpu_device *adev); int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); + int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); + int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(struct amdgpu_device *adev); + int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); + int (*get_mclk_od)(struct amdgpu_device *adev); + int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); }; struct amdgpu_dpm { @@ -1582,15 +1660,20 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev); /* * UVD */ -#define AMDGPU_MAX_UVD_HANDLES 10 -#define AMDGPU_UVD_STACK_SIZE (1024*1024) -#define AMDGPU_UVD_HEAP_SIZE (1024*1024) -#define AMDGPU_UVD_FIRMWARE_OFFSET 256 +#define AMDGPU_DEFAULT_UVD_HANDLES 10 +#define AMDGPU_MAX_UVD_HANDLES 40 +#define AMDGPU_UVD_STACK_SIZE (200*1024) +#define AMDGPU_UVD_HEAP_SIZE (256*1024) +#define AMDGPU_UVD_SESSION_SIZE (50*1024) +#define AMDGPU_UVD_FIRMWARE_OFFSET 256 struct amdgpu_uvd { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; + unsigned fw_version; + void *saved_bo; + unsigned max_handles; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; @@ -1598,6 +1681,7 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + bool use_ctx_buf; struct amd_sched_entity entity; }; @@ -1619,6 +1703,7 @@ struct amdgpu_vce { struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; struct delayed_work idle_work; + struct mutex idle_mutex; const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; @@ -1643,7 +1728,7 @@ struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; - int num_instances; + int num_instances; }; /* @@ -1689,12 +1774,12 @@ static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} * Debugfs */ struct amdgpu_debugfs { - struct drm_info_list *files; + const struct drm_info_list *files; unsigned num_files; }; int amdgpu_debugfs_add_files(struct amdgpu_device *adev, - struct drm_info_list *files, + const struct drm_info_list *files, unsigned nfiles); int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); @@ -1703,6 +1788,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor); void amdgpu_debugfs_cleanup(struct drm_minor *minor); #endif +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); + /* * amdgpu smumgr functions */ @@ -1736,13 +1823,6 @@ struct amdgpu_allowed_register_entry { bool grbm_indexed; }; -struct amdgpu_cu_info { - uint32_t number; /* total active CU number */ - uint32_t ao_cu_mask; - uint32_t bitmap[4][4]; -}; - - /* * ASIC specific functions. */ @@ -1754,16 +1834,13 @@ struct amdgpu_asic_funcs { u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); int (*reset)(struct amdgpu_device *adev); - /* wait for mc_idle */ - int (*wait_for_mc_idle)(struct amdgpu_device *adev); /* get the reference clock */ u32 (*get_xclk)(struct amdgpu_device *adev); - /* get the gpu clock counter */ - uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); - int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info); /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); + /* query virtual capabilities */ + u32 (*get_virtual_caps)(struct amdgpu_device *adev); }; /* @@ -1853,20 +1930,17 @@ struct amdgpu_atcs { /* * CGS */ -void *amdgpu_cgs_create_device(struct amdgpu_device *adev); -void amdgpu_cgs_destroy_device(void *cgs_device); - - -/* - * CGS - */ -void *amdgpu_cgs_create_device(struct amdgpu_device *adev); -void amdgpu_cgs_destroy_device(void *cgs_device); +struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); +void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); /* GPU virtualization */ +#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) +#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) struct amdgpu_virtualization { bool supports_sr_iov; + bool is_virtual; + u32 caps; }; /* @@ -1902,16 +1976,15 @@ struct amdgpu_device { int usec_timeout; const struct amdgpu_asic_funcs *asic_funcs; bool shutdown; - bool suspend; bool need_dma32; bool accel_working; - struct work_struct reset_work; + struct work_struct reset_work; struct notifier_block acpi_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; - unsigned debugfs_count; + unsigned debugfs_count; #if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_regs; + struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; #endif struct amdgpu_atif atif; struct amdgpu_atcs atcs; @@ -1924,7 +1997,6 @@ struct amdgpu_device { /* BIOS */ uint8_t *bios; bool is_atom_bios; - uint16_t bios_header_start; struct amdgpu_bo *stollen_vga_memory; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -1950,6 +2022,10 @@ struct amdgpu_device { spinlock_t didt_idx_lock; amdgpu_rreg_t didt_rreg; amdgpu_wreg_t didt_wreg; + /* protects concurrent gc_cac register access */ + spinlock_t gc_cac_idx_lock; + amdgpu_rreg_t gc_cac_rreg; + amdgpu_wreg_t gc_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -1975,6 +2051,7 @@ struct amdgpu_device { atomic64_t vram_vis_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic64_t num_evictions; atomic_t gpu_reset_counter; /* display */ @@ -1985,7 +2062,7 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - unsigned fence_context; + u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; @@ -2033,6 +2110,7 @@ struct amdgpu_device { /* tracking pinned memory */ u64 vram_pin_size; + u64 invisible_pin_size; u64 gart_pin_size; /* amdkfd interface */ @@ -2077,6 +2155,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) +#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) +#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ @@ -2152,15 +2232,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) */ #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) -#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) -#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) +#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) -#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) @@ -2168,11 +2246,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) -#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) +#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) +#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) @@ -2180,6 +2258,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) +#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) +#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) @@ -2193,7 +2273,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) -#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base)) +#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) @@ -2208,6 +2288,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) +#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_dpm_get_temperature(adev) \ ((adev)->pp_enabled ? \ @@ -2286,6 +2368,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_force_clock_level(adev, type, level) \ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) +#define amdgpu_dpm_get_sclk_od(adev) \ + (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) + +#define amdgpu_dpm_set_sclk_od(adev, value) \ + (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) + +#define amdgpu_dpm_get_mclk_od(adev) \ + ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_set_mclk_od(adev, value) \ + ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2327,16 +2421,20 @@ bool amdgpu_device_is_px(struct drm_device *dev); #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); +bool amdgpu_has_atpx_dgpu_power_cntl(void); +bool amdgpu_is_atpx_hybrid(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} +static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool amdgpu_is_atpx_hybrid(void) { return false; } #endif /* * KMS */ extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; -extern int amdgpu_max_kms_ioctl; +extern const int amdgpu_max_kms_ioctl; int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); int amdgpu_driver_unload_kms(struct drm_device *dev); @@ -2395,5 +2493,4 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo); #include "amdgpu_object.h" - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index d6b0bff510aa..892d60fb225b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -421,25 +421,6 @@ static int acp_suspend(void *handle) static int acp_resume(void *handle) { - int i, ret; - struct acp_pm_domain *apd; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - /* SMU block will power on ACP irrespective of ACP runtime status. - * Power off explicitly based on genpd ACP runtime status so that ACP - * hw and ACP-genpd status are in sync. - * 'suspend_power_off' represents "Power status before system suspend" - */ - if (adev->acp.acp_genpd->gpd.suspend_power_off == true) { - apd = container_of(&adev->acp.acp_genpd->gpd, - struct acp_pm_domain, gpd); - - for (i = 4; i >= 0 ; i--) { - ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); - if (ret) - pr_err("ACP tile %d tile suspend failed\n", i); - } - } return 0; } @@ -463,13 +444,6 @@ static int acp_soft_reset(void *handle) return 0; } -static void acp_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "ACP STATUS\n"); -} - static int acp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -483,6 +457,7 @@ static int acp_set_powergating_state(void *handle, } const struct amd_ip_funcs acp_ip_funcs = { + .name = "acp_ip", .early_init = acp_early_init, .late_init = NULL, .sw_init = acp_sw_init, @@ -494,7 +469,6 @@ const struct amd_ip_funcs acp_ip_funcs = { .is_idle = acp_is_idle, .wait_for_idle = acp_wait_for_idle, .soft_reset = acp_soft_reset, - .print_status = acp_print_status, .set_clockgating_state = acp_set_clockgating_state, .set_powergating_state = acp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h index f6e32a639107..8a396313c86f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h @@ -30,7 +30,7 @@ struct amdgpu_acp { struct device *parent; - void *cgs_device; + struct cgs_device *cgs_device; struct amd_acp_private *private; struct mfd_cell *acp_cell; struct resource *acp_res; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 32809f749903..d080d0807a5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) { struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; - if (rdev->asic_funcs->get_gpu_clock_counter) - return rdev->asic_funcs->get_gpu_clock_counter(rdev); + if (rdev->gfx.funcs->get_gpu_clock_counter) + return rdev->gfx.funcs->get_gpu_clock_counter(rdev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 84b0ce39ee14..983175363b06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -234,16 +234,6 @@ amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev, return hpd; } -static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev, - uint32_t supported_device, - int *connector_type, - struct amdgpu_i2c_bus_rec *i2c_bus, - uint16_t *line_mux, - struct amdgpu_hpd *hpd) -{ - return true; -} - static const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DVII, @@ -514,11 +504,6 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * conn_id = le16_to_cpu(path->usConnObjectId); - if (!amdgpu_atombios_apply_quirks - (adev, le16_to_cpu(path->usDeviceTag), &connector_type, - &ddc_bus, &conn_id, &hpd)) - continue; - amdgpu_display_add_connector(adev, conn_id, le16_to_cpu(path->usDeviceTag), @@ -566,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) le16_to_cpu(firmware_info->info.usReferenceClock); ppll->reference_div = 0; - if (crev < 2) - ppll->pll_out_min = - le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); - else - ppll->pll_out_min = - le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); + ppll->pll_out_min = + le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); ppll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { - ppll->lcd_pll_out_min = - le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_min == 0) - ppll->lcd_pll_out_min = ppll->pll_out_min; - ppll->lcd_pll_out_max = - le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_max == 0) - ppll->lcd_pll_out_max = ppll->pll_out_max; - } else { + ppll->lcd_pll_out_min = + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_min == 0) ppll->lcd_pll_out_min = ppll->pll_out_min; + ppll->lcd_pll_out_max = + le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_max == 0) ppll->lcd_pll_out_max = ppll->pll_out_max; - } if (ppll->pll_out_min == 0) ppll->pll_out_min = 64800; @@ -699,6 +675,36 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) return ret; } +union gfx_info { + ATOM_GFX_INFO_V2_1 info; +}; + +int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = GetIndexIntoMasterTable(DATA, GFX_Info); + uint8_t frev, crev; + uint16_t data_offset; + int ret = -EINVAL; + + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union gfx_info *gfx_info = (union gfx_info *) + (mode_info->atom_context->bios + data_offset); + + adev->gfx.config.max_shader_engines = gfx_info->info.max_shader_engines; + adev->gfx.config.max_tile_pipes = gfx_info->info.max_tile_pipes; + adev->gfx.config.max_cu_per_sh = gfx_info->info.max_cu_per_sh; + adev->gfx.config.max_sh_per_se = gfx_info->info.max_sh_per_se; + adev->gfx.config.max_backends_per_se = gfx_info->info.max_backends_per_se; + adev->gfx.config.max_texture_channel_caches = + gfx_info->info.max_texture_channel_caches; + + ret = 0; + } + return ret; +} + union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 9e1442053fe4..8c2e69661799 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -144,6 +144,8 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev); +int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev); + bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev, struct amdgpu_atom_ss *ss, int id, u32 clock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 0020a0ea43ff..49de92600074 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "amd_acpi.h" @@ -27,6 +28,7 @@ struct amdgpu_atpx_functions { struct amdgpu_atpx { acpi_handle handle; struct amdgpu_atpx_functions functions; + bool is_hybrid; }; static struct amdgpu_atpx_priv { @@ -67,6 +69,10 @@ bool amdgpu_has_atpx_dgpu_power_cntl(void) { return amdgpu_atpx_priv.atpx.functions.power_cntl; } +bool amdgpu_is_atpx_hybrid(void) { + return amdgpu_atpx_priv.atpx.is_hybrid; +} + /** * amdgpu_atpx_call - call an ATPX method * @@ -146,11 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas */ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) { + u32 valid_bits = 0; + if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; size_t size; - u32 valid_bits; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); if (!info) @@ -169,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) memcpy(&output, info->buffer.pointer, size); valid_bits = output.flags & output.valid_flags; - /* if separate mux flag is set, mux controls are required */ - if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { - atpx->functions.i2c_mux_cntl = true; - atpx->functions.disp_mux_cntl = true; - } - /* if any outputs are muxed, mux controls are required */ - if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | - ATPX_TV_SIGNAL_MUXED | - ATPX_DFP_SIGNAL_MUXED)) - atpx->functions.disp_mux_cntl = true; kfree(info); } + + /* if separate mux flag is set, mux controls are required */ + if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { + atpx->functions.i2c_mux_cntl = true; + atpx->functions.disp_mux_cntl = true; + } + /* if any outputs are muxed, mux controls are required */ + if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | + ATPX_TV_SIGNAL_MUXED | + ATPX_DFP_SIGNAL_MUXED)) + atpx->functions.disp_mux_cntl = true; + + + /* some bioses set these bits rather than flagging power_cntl as supported */ + if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED | + ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED)) + atpx->functions.power_cntl = true; + + atpx->is_hybrid = false; + if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { + printk("ATPX Hybrid Graphics\n"); +#if 1 + /* This is a temporary hack until the D3 cold support + * makes it upstream. The ATPX power_control method seems + * to still work on even if the system should be using + * the new standardized hybrid D3 cold ACPI interface. + */ + atpx->functions.power_cntl = true; +#else + atpx->functions.power_cntl = false; +#endif + atpx->is_hybrid = true; + } + return 0; } @@ -256,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } @@ -504,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) static const struct vga_switcheroo_handler amdgpu_atpx_handler = { .switchto = amdgpu_atpx_switchto, .power_state = amdgpu_atpx_power_state, - .init = amdgpu_atpx_init, .get_client_id = amdgpu_atpx_get_client_id, }; @@ -539,6 +573,7 @@ static bool amdgpu_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); amdgpu_atpx_priv.atpx_detected = true; + amdgpu_atpx_init(); return true; } return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index cd639c362df3..33e47a43ae32 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -141,7 +141,7 @@ out_cleanup: void amdgpu_benchmark(struct amdgpu_device *adev, int test_number) { int i; - int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = { + static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = { 640 * 480 * 4, 720 * 480 * 4, 800 * 600 * 4, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 80add22375ee..2b6afe123f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -349,25 +349,25 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) bool amdgpu_get_bios(struct amdgpu_device *adev) { bool r; - uint16_t tmp; + uint16_t tmp, bios_header_start; r = amdgpu_atrm_get_bios(adev); - if (r == false) + if (!r) r = amdgpu_acpi_vfct_bios(adev); - if (r == false) + if (!r) r = igp_read_bios_from_vram(adev); - if (r == false) + if (!r) r = amdgpu_read_bios(adev); - if (r == false) { + if (!r) { r = amdgpu_read_bios_from_rom(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_disabled_bios(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_platform_bios(adev); } - if (r == false || adev->bios == NULL) { + if (!r || adev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); adev->bios = NULL; return false; @@ -383,11 +383,11 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) goto free_bios; } - adev->bios_header_start = RBIOS16(0x48); - if (!adev->bios_header_start) { + bios_header_start = RBIOS16(0x48); + if (!bios_header_start) { goto free_bios; } - tmp = adev->bios_header_start + 4; + tmp = bios_header_start + 4; if (!memcmp(adev->bios + tmp, "ATOM", 4) || !memcmp(adev->bios + tmp, "MOTA", 4)) { adev->is_atom_bios = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index eacd810fc09b..651115dcce12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, unsigned last_entry = 0, first_userptr = num_entries; unsigned i; int r; + unsigned long total_size = 0; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) @@ -106,7 +107,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, struct amdgpu_bo *bo; struct mm_struct *usermm; - gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); + gobj = drm_gem_object_lookup(filp, info[i].bo_handle); if (!gobj) { r = -ENOENT; goto error_free; @@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; + total_size += amdgpu_bo_size(entry->robj); trace_amdgpu_bo_list_set(list, entry->robj); } @@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, list->array = array; list->num_entries = num_entries; + trace_amdgpu_cs_bo_status(list->num_entries, total_size); return 0; error_free: @@ -263,7 +266,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, for (i = 0; i < args->in.bo_number; ++i) { if (copy_from_user(&info[i], uptr, bytes)) goto error_free; - + uptr += args->in.bo_info_size; } } @@ -271,7 +274,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, switch (args->in.operation) { case AMDGPU_BO_LIST_OP_CREATE: r = amdgpu_bo_list_create(fpriv, &list, &handle); - if (r) + if (r) goto error_free; r = amdgpu_bo_list_set(adev, filp, list, info, @@ -281,7 +284,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, goto error_free; break; - + case AMDGPU_BO_LIST_OP_DESTROY: amdgpu_bo_list_destroy(fpriv, handle); handle = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 7a4b101e10c6..bc0440f7a31d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -42,7 +42,7 @@ struct amdgpu_cgs_device { struct amdgpu_device *adev = \ ((struct amdgpu_cgs_device *)cgs_device)->adev -static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type, +static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type, uint64_t *mc_start, uint64_t *mc_size, uint64_t *mem_size) { @@ -73,7 +73,7 @@ static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type, return 0; } -static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, +static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem, uint64_t size, uint64_t min_offset, uint64_t max_offset, cgs_handle_t *kmem_handle, uint64_t *mcaddr) @@ -102,7 +102,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, return ret; } -static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle) +static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; @@ -118,7 +118,7 @@ static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle) return 0; } -static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, +static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type, uint64_t size, uint64_t align, uint64_t min_offset, uint64_t max_offset, @@ -208,7 +208,7 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, return ret; } -static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) +static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -225,7 +225,7 @@ static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) return 0; } -static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle, +static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, uint64_t *mcaddr) { int r; @@ -246,7 +246,7 @@ static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle, return r; } -static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) +static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) { int r; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -258,7 +258,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) return r; } -static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle, +static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, void **map) { int r; @@ -271,7 +271,7 @@ static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle, return r; } -static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) +static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) { int r; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -283,20 +283,20 @@ static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) return r; } -static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset) +static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset) { CGS_FUNC_ADEV; return RREG32(offset); } -static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset, +static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset, uint32_t value) { CGS_FUNC_ADEV; WREG32(offset, value); } -static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device, +static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, enum cgs_ind_reg space, unsigned index) { @@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device, return RREG32_UVD_CTX(index); case CGS_IND_REG__DIDT: return RREG32_DIDT(index); + case CGS_IND_REG_GC_CAC: + return RREG32_GC_CAC(index); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; @@ -320,7 +322,7 @@ static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device, return 0; } -static void amdgpu_cgs_write_ind_register(void *cgs_device, +static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, enum cgs_ind_reg space, unsigned index, uint32_t value) { @@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(void *cgs_device, return WREG32_UVD_CTX(index, value); case CGS_IND_REG__DIDT: return WREG32_DIDT(index, value); + case CGS_IND_REG_GC_CAC: + return WREG32_GC_CAC(index, value); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; @@ -343,7 +347,7 @@ static void amdgpu_cgs_write_ind_register(void *cgs_device, WARN(1, "Invalid indirect register space"); } -static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr) +static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr) { CGS_FUNC_ADEV; uint8_t val; @@ -353,7 +357,7 @@ static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr) return val; } -static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr) +static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr) { CGS_FUNC_ADEV; uint16_t val; @@ -363,7 +367,7 @@ static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr) return val; } -static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device, +static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device, unsigned addr) { CGS_FUNC_ADEV; @@ -374,7 +378,7 @@ static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device, return val; } -static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr, +static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr, uint8_t value) { CGS_FUNC_ADEV; @@ -382,7 +386,7 @@ static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr, WARN(ret, "pci_write_config_byte error"); } -static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr, +static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr, uint16_t value) { CGS_FUNC_ADEV; @@ -390,7 +394,7 @@ static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr, WARN(ret, "pci_write_config_word error"); } -static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, +static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr, uint32_t value) { CGS_FUNC_ADEV; @@ -399,7 +403,7 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, } -static int amdgpu_cgs_get_pci_resource(void *cgs_device, +static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device, enum cgs_resource_type resource_type, uint64_t size, uint64_t offset, @@ -433,7 +437,7 @@ static int amdgpu_cgs_get_pci_resource(void *cgs_device, } } -static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, +static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device, unsigned table, uint16_t *size, uint8_t *frev, uint8_t *crev) { @@ -449,7 +453,7 @@ static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, return NULL; } -static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table, +static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table, uint8_t *frev, uint8_t *crev) { CGS_FUNC_ADEV; @@ -462,7 +466,7 @@ static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table, return -EINVAL; } -static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table, +static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table, void *args) { CGS_FUNC_ADEV; @@ -471,33 +475,33 @@ static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table, adev->mode_info.atom_context, table, args); } -static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request) +static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request) { /* TODO */ return 0; } -static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request) +static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request) { /* TODO */ return 0; } -static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request, +static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request, int active) { /* TODO */ return 0; } -static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request, +static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request, enum cgs_clock clock, unsigned freq) { /* TODO */ return 0; } -static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request, +static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request, enum cgs_engine engine, int powered) { /* TODO */ @@ -506,7 +510,7 @@ static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request, -static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device, +static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device, enum cgs_clock clock, struct cgs_clock_limits *limits) { @@ -514,7 +518,7 @@ static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device, return 0; } -static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask, +static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask, const uint32_t *voltages) { DRM_ERROR("not implemented"); @@ -565,7 +569,7 @@ static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { .process = cgs_process_irq, }; -static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id, +static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id, unsigned num_types, cgs_irq_source_set_func_t set, cgs_irq_handler_func_t handler, @@ -600,19 +604,19 @@ static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id, return ret; } -static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type) +static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type) { CGS_FUNC_ADEV; return amdgpu_irq_get(adev, adev->irq.sources[src_id], type); } -static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type) +static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type) { CGS_FUNC_ADEV; return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); } -int amdgpu_cgs_set_clockgating_state(void *cgs_device, +int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_clockgating_state state) { @@ -633,7 +637,7 @@ int amdgpu_cgs_set_clockgating_state(void *cgs_device, return r; } -int amdgpu_cgs_set_powergating_state(void *cgs_device, +int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_powergating_state state) { @@ -655,7 +659,7 @@ int amdgpu_cgs_set_powergating_state(void *cgs_device, } -static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type) +static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) { CGS_FUNC_ADEV; enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; @@ -681,9 +685,10 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type) result = AMDGPU_UCODE_ID_CP_MEC1; break; case CGS_UCODE_ID_CP_MEC_JT2: - if (adev->asic_type == CHIP_TONGA) + if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11 + || adev->asic_type == CHIP_POLARIS10) result = AMDGPU_UCODE_ID_CP_MEC2; - else if (adev->asic_type == CHIP_CARRIZO) + else result = AMDGPU_UCODE_ID_CP_MEC1; break; case CGS_UCODE_ID_RLC_G: @@ -695,13 +700,24 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type) return result; } -static int amdgpu_cgs_get_firmware_info(void *cgs_device, +static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) +{ + CGS_FUNC_ADEV; + if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { + release_firmware(adev->pm.fw); + return 0; + } + /* cannot release other firmware because they are not created by cgs */ + return -EINVAL; +} + +static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) { CGS_FUNC_ADEV; - if (CGS_UCODE_ID_SMU != type) { + if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) { uint64_t gpu_addr; uint32_t data_size; const struct gfx_firmware_header_v1_0 *header; @@ -734,33 +750,51 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device, const uint8_t *src; const struct smc_firmware_header_v1_0 *hdr; - switch (adev->asic_type) { - case CHIP_TONGA: - strcpy(fw_name, "amdgpu/tonga_smc.bin"); - break; - case CHIP_FIJI: - strcpy(fw_name, "amdgpu/fiji_smc.bin"); - break; - default: - DRM_ERROR("SMC firmware not supported\n"); - return -EINVAL; - } + if (!adev->pm.fw) { + switch (adev->asic_type) { + case CHIP_TOPAZ: + strcpy(fw_name, "amdgpu/topaz_smc.bin"); + break; + case CHIP_TONGA: + strcpy(fw_name, "amdgpu/tonga_smc.bin"); + break; + case CHIP_FIJI: + strcpy(fw_name, "amdgpu/fiji_smc.bin"); + break; + case CHIP_POLARIS11: + if (type == CGS_UCODE_ID_SMU) + strcpy(fw_name, "amdgpu/polaris11_smc.bin"); + else if (type == CGS_UCODE_ID_SMU_SK) + strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); + break; + case CHIP_POLARIS10: + if (type == CGS_UCODE_ID_SMU) + strcpy(fw_name, "amdgpu/polaris10_smc.bin"); + else if (type == CGS_UCODE_ID_SMU_SK) + strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); + break; + default: + DRM_ERROR("SMC firmware not supported\n"); + return -EINVAL; + } - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) { - DRM_ERROR("Failed to request firmware\n"); - return err; - } + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) { + DRM_ERROR("Failed to request firmware\n"); + return err; + } - err = amdgpu_ucode_validate(adev->pm.fw); - if (err) { - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - return err; + err = amdgpu_ucode_validate(adev->pm.fw); + if (err) { + DRM_ERROR("Failed to load firmware \"%s\"", fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return err; + } } hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); @@ -769,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device, info->version = adev->pm.fw_version; info->image_size = ucode_size; + info->ucode_start_address = ucode_start_address; info->kptr = (void *)src; } return 0; } -static int amdgpu_cgs_query_system_info(void *cgs_device, - struct cgs_system_info *sys_info) +static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, + struct cgs_system_info *sys_info) { CGS_FUNC_ADEV; @@ -795,12 +830,24 @@ static int amdgpu_cgs_query_system_info(void *cgs_device, case CGS_SYSTEM_INFO_PCIE_MLW: sys_info->value = adev->pm.pcie_mlw_mask; break; + case CGS_SYSTEM_INFO_PCIE_DEV: + sys_info->value = adev->pdev->device; + break; + case CGS_SYSTEM_INFO_PCIE_REV: + sys_info->value = adev->pdev->revision; + break; case CGS_SYSTEM_INFO_CG_FLAGS: sys_info->value = adev->cg_flags; break; case CGS_SYSTEM_INFO_PG_FLAGS: sys_info->value = adev->pg_flags; break; + case CGS_SYSTEM_INFO_GFX_CU_INFO: + sys_info->value = adev->gfx.cu_info.number; + break; + case CGS_SYSTEM_INFO_GFX_SE_INFO: + sys_info->value = adev->gfx.config.max_shader_engines; + break; default: return -ENODEV; } @@ -808,7 +855,7 @@ static int amdgpu_cgs_query_system_info(void *cgs_device, return 0; } -static int amdgpu_cgs_get_active_displays_info(void *cgs_device, +static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, struct cgs_display_info *info) { CGS_FUNC_ADEV; @@ -816,10 +863,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, struct drm_device *ddev = adev->ddev; struct drm_crtc *crtc; uint32_t line_time_us, vblank_lines; + struct cgs_mode_info *mode_info; if (info == NULL) return -EINVAL; + mode_info = info->mode_info; + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { @@ -828,7 +878,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); info->display_count++; } - if (info->mode_info != NULL && + if (mode_info != NULL && crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / @@ -836,10 +886,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - amdgpu_crtc->hw_mode.crtc_vdisplay + (amdgpu_crtc->v_border * 2); - info->mode_info->vblank_time_us = vblank_lines * line_time_us; - info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - info->mode_info->ref_clock = adev->clock.spll.reference_freq; - info->mode_info++; + mode_info->vblank_time_us = vblank_lines * line_time_us; + mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); + mode_info->ref_clock = adev->clock.spll.reference_freq; + mode_info = NULL; } } } @@ -847,6 +897,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, return 0; } + +static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled) +{ + CGS_FUNC_ADEV; + + adev->pm.dpm_enabled = enabled; + + return 0; +} + /** \brief evaluate acpi namespace object, handle or pathname must be valid * \param cgs_device * \param info input/output arguments for the control method @@ -854,21 +914,19 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, */ #if defined(CONFIG_ACPI) -static int amdgpu_cgs_acpi_eval_object(void *cgs_device, +static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, struct cgs_acpi_method_info *info) { CGS_FUNC_ADEV; acpi_handle handle; struct acpi_object_list input; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *params = NULL; - union acpi_object *obj = NULL; + union acpi_object *params, *obj; uint8_t name[5] = {'\0'}; - struct cgs_acpi_method_argument *argument = NULL; + struct cgs_acpi_method_argument *argument; uint32_t i, count; acpi_status status; int result; - uint32_t func_no = 0xFFFFFFFF; handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) @@ -885,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, if (info->pinput_argument == NULL) return -EINVAL; argument = info->pinput_argument; - func_no = argument->value; for (i = 0; i < info->input_count; i++) { if (((argument->type == ACPI_TYPE_STRING) || (argument->type == ACPI_TYPE_BUFFER)) && @@ -930,11 +987,11 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, params->integer.value = argument->value; break; case ACPI_TYPE_STRING: - params->string.length = argument->method_length; + params->string.length = argument->data_length; params->string.pointer = argument->pointer; break; case ACPI_TYPE_BUFFER: - params->buffer.length = argument->method_length; + params->buffer.length = argument->data_length; params->buffer.pointer = argument->pointer; break; default: @@ -954,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, if (ACPI_FAILURE(status)) { result = -EIO; - goto error; + goto free_input; } /* return the output info */ @@ -964,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, if ((obj->type != ACPI_TYPE_PACKAGE) || (obj->package.count != count)) { result = -EIO; - goto error; + goto free_obj; } params = obj->package.elements; } else @@ -972,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, if (params == NULL) { result = -EIO; - goto error; + goto free_obj; } for (i = 0; i < count; i++) { if (argument->type != params->type) { result = -EIO; - goto error; + goto free_obj; } switch (params->type) { case ACPI_TYPE_INTEGER: @@ -988,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, if ((params->string.length != argument->data_length) || (params->string.pointer == NULL)) { result = -EIO; - goto error; + goto free_obj; } strncpy(argument->pointer, params->string.pointer, @@ -997,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, case ACPI_TYPE_BUFFER: if (params->buffer.pointer == NULL) { result = -EIO; - goto error; + goto free_obj; } memcpy(argument->pointer, params->buffer.pointer, @@ -1010,21 +1067,22 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device, params++; } -error: - if (obj != NULL) - kfree(obj); + result = 0; +free_obj: + kfree(obj); +free_input: kfree((void *)input.pointer); return result; } #else -static int amdgpu_cgs_acpi_eval_object(void *cgs_device, +static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, struct cgs_acpi_method_info *info) { return -EIO; } #endif -int amdgpu_cgs_call_acpi_method(void *cgs_device, +static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, uint32_t acpi_method, uint32_t acpi_function, void *pinput, void *poutput, @@ -1037,17 +1095,14 @@ int amdgpu_cgs_call_acpi_method(void *cgs_device, struct cgs_acpi_method_info info = {0}; acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; - acpi_input[0].method_length = sizeof(uint32_t); acpi_input[0].data_length = sizeof(uint32_t); acpi_input[0].value = acpi_function; acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; - acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_input[1].data_length = input_size; acpi_input[1].pointer = pinput; acpi_output.type = CGS_ACPI_TYPE_BUFFER; - acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_output.data_length = output_size; acpi_output.pointer = poutput; @@ -1094,9 +1149,11 @@ static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_pm_query_clock_limits, amdgpu_cgs_set_camera_voltages, amdgpu_cgs_get_firmware_info, + amdgpu_cgs_rel_firmware, amdgpu_cgs_set_powergating_state, amdgpu_cgs_set_clockgating_state, amdgpu_cgs_get_active_displays_info, + amdgpu_cgs_notify_dpm_enabled, amdgpu_cgs_call_acpi_method, amdgpu_cgs_query_system_info, }; @@ -1107,7 +1164,7 @@ static const struct cgs_os_ops amdgpu_cgs_os_ops = { amdgpu_cgs_irq_put }; -void *amdgpu_cgs_create_device(struct amdgpu_device *adev) +struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) { struct amdgpu_cgs_device *cgs_device = kmalloc(sizeof(*cgs_device), GFP_KERNEL); @@ -1121,10 +1178,10 @@ void *amdgpu_cgs_create_device(struct amdgpu_device *adev) cgs_device->base.os_ops = &amdgpu_cgs_os_ops; cgs_device->adev = adev; - return cgs_device; + return (struct cgs_device *)cgs_device; } -void amdgpu_cgs_destroy_device(void *cgs_device) +void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device) { kfree(cgs_device); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 119cdc2c43e7..ff0b55a65ca3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) bpc = 8; DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", connector->name, bpc); - } else if (bpc > 8) { - /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ - DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", - connector->name); - bpc = 8; } + } else if (bpc > 8) { + /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ + DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", + connector->name); + bpc = 8; } } @@ -439,7 +439,7 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder, struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; int i; - struct mode_size { + static const struct mode_size { int w; int h; } common_modes[17] = { @@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev, } if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9392e50a7ba4..0307ff5887c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -24,7 +24,6 @@ * Authors: * Jerome Glisse <glisse@freedesktop.org> */ -#include <linux/list_sort.h> #include <linux/pagemap.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> @@ -88,44 +87,41 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, - struct amdgpu_user_fence *uf, - struct drm_amdgpu_cs_chunk_fence *fence_data) + struct drm_amdgpu_cs_chunk_fence *data, + uint32_t *offset) { struct drm_gem_object *gobj; - uint32_t handle; - handle = fence_data->handle; - gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, - fence_data->handle); + gobj = drm_gem_object_lookup(p->filp, data->handle); if (gobj == NULL) return -EINVAL; - uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - uf->offset = fence_data->offset; - - if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) { - drm_gem_object_unreference_unlocked(gobj); - return -EINVAL; - } - - p->uf_entry.robj = amdgpu_bo_ref(uf->bo); + p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; p->uf_entry.tv.shared = true; p->uf_entry.user_pages = NULL; + *offset = data->offset; drm_gem_object_unreference_unlocked(gobj); + + if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { + amdgpu_bo_unref(&p->uf_entry.robj); + return -EINVAL; + } + return 0; } int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array; - struct amdgpu_user_fence uf = {}; unsigned size, num_ibs = 0; + uint32_t uf_offset = 0; int i; int ret; @@ -200,7 +196,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto free_partial_kdata; } - ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata); + ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, + &uf_offset); if (ret) goto free_partial_kdata; @@ -215,12 +212,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } } - ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job); + ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); if (ret) goto free_all_kdata; - p->job->uf = uf; - + if (p->uf_entry.robj) + p->job->uf_addr = uf_offset; kfree(chunk_array); return 0; @@ -377,7 +374,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - if (p->job->uf.bo) + if (p->uf_entry.robj) list_add(&p->uf_entry.tv.head, &p->validated); if (need_mmap_lock) @@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_splice(&need_pages, &p->validated); } - amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); + amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates); p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); p->bytes_moved = 0; @@ -472,7 +469,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, if (r) goto error_validate; + fpriv->vm.last_eviction_counter = + atomic64_read(&p->adev->num_evictions); + if (p->bo_list) { + struct amdgpu_bo *gds = p->bo_list->gds_obj; + struct amdgpu_bo *gws = p->bo_list->gws_obj; + struct amdgpu_bo *oa = p->bo_list->oa_obj; struct amdgpu_vm *vm = &fpriv->vm; unsigned i; @@ -481,8 +484,24 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); } + + if (gds) { + p->job->gds_base = amdgpu_bo_gpu_offset(gds); + p->job->gds_size = amdgpu_bo_size(gds); + } + if (gws) { + p->job->gws_base = amdgpu_bo_gpu_offset(gws); + p->job->gws_size = amdgpu_bo_size(gws); + } + if (oa) { + p->job->oa_base = amdgpu_bo_gpu_offset(oa); + p->job->oa_size = amdgpu_bo_size(oa); + } } + if (p->uf_entry.robj) + p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj); + error_validate: if (r) { amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); @@ -527,16 +546,6 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) return 0; } -static int cmp_size_smaller_first(void *priv, struct list_head *a, - struct list_head *b) -{ - struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head); - struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head); - - /* Sort A before B if A is smaller. */ - return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; -} - /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. @@ -553,18 +562,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo if (!error) { amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); - /* Sort the buffer list from the smallest to largest buffer, - * which affects the order of buffers in the LRU list. - * This assures that the smallest buffers are added first - * to the LRU list, so they are likely to be later evicted - * first, instead of large buffers whose eviction is more - * expensive. - * - * This slightly lowers the number of bytes moved by TTM - * per frame under memory pressure. - */ - list_sort(NULL, &parser->validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, parser->fence); @@ -659,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, /* Only for UVD/VCE VM emulation */ if (ring->funcs->parse_cs) { + p->job->vm = NULL; for (i = 0; i < p->job->num_ibs; i++) { r = amdgpu_ring_parse_cs(ring, p, i); if (r) return r; } - } + } else { + p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); - r = amdgpu_bo_vm_update_pte(p, vm); - if (!r) - amdgpu_cs_sync_rings(p); + r = amdgpu_bo_vm_update_pte(p, vm); + if (r) + return r; + } - return r; + return amdgpu_cs_sync_rings(p); } static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) @@ -763,41 +763,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ib->length_dw = chunk_ib->ib_bytes / 4; ib->flags = chunk_ib->flags; - ib->ctx = parser->ctx; j++; } - /* add GDS resources to first IB */ - if (parser->bo_list) { - struct amdgpu_bo *gds = parser->bo_list->gds_obj; - struct amdgpu_bo *gws = parser->bo_list->gws_obj; - struct amdgpu_bo *oa = parser->bo_list->oa_obj; - struct amdgpu_ib *ib = &parser->job->ibs[0]; - - if (gds) { - ib->gds_base = amdgpu_bo_gpu_offset(gds); - ib->gds_size = amdgpu_bo_size(gds); - } - if (gws) { - ib->gws_base = amdgpu_bo_gpu_offset(gws); - ib->gws_size = amdgpu_bo_size(gws); - } - if (oa) { - ib->oa_base = amdgpu_bo_gpu_offset(oa); - ib->oa_size = amdgpu_bo_size(oa); - } - } - /* wrap the last IB with user fence */ - if (parser->job->uf.bo) { - struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; - - /* UVD & VCE fw doesn't support user fences */ - if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD || - parser->job->ring->type == AMDGPU_RING_TYPE_VCE) - return -EINVAL; - - ib->user = &parser->job->uf; - } + /* UVD & VCE fw doesn't support user fences */ + if (parser->job->uf_addr && ( + parser->job->ring->type == AMDGPU_RING_TYPE_UVD || + parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) + return -EINVAL; return 0; } @@ -862,28 +835,25 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_ring *ring = p->job->ring; - struct amd_sched_fence *fence; + struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amdgpu_job *job; + int r; job = p->job; p->job = NULL; - job->base.sched = &ring->sched; - job->base.s_entity = &p->ctx->rings[ring->idx].entity; - job->owner = p->filp; - - fence = amd_sched_fence_create(job->base.s_entity, p->filp); - if (!fence) { + r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); + if (r) { amdgpu_job_free(job); - return -ENOMEM; + return r; } - job->base.s_fence = fence; - p->fence = fence_get(&fence->base); - - cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, - &fence->base); - job->ibs[job->num_ibs - 1].sequence = cs->out.handle; + job->owner = p->filp; + job->ctx = entity->fence_context; + p->fence = fence_get(&job->base.s_fence->finished); + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); + job->uf_sequence = cs->out.handle; + amdgpu_job_free_resources(job); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 612117478b57..df7ab2458e50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/kthread.h> #include <linux/console.h> #include <linux/slab.h> #include <linux/debugfs.h> @@ -35,6 +36,7 @@ #include <linux/vga_switcheroo.h> #include <linux/efi.h> #include "amdgpu.h" +#include "amdgpu_trace.h" #include "amdgpu_i2c.h" #include "atom.h" #include "amdgpu_atombios.h" @@ -59,15 +61,11 @@ static const char *amdgpu_asic_name[] = { "FIJI", "CARRIZO", "STONEY", + "POLARIS10", + "POLARIS11", "LAST", }; -#if defined(CONFIG_VGA_SWITCHEROO) -bool amdgpu_has_atpx_dgpu_power_cntl(void); -#else -static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } -#endif - bool amdgpu_device_is_px(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; @@ -83,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev) uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, bool always_indirect) { + uint32_t ret; + if ((reg * 4) < adev->rmmio_size && !always_indirect) - return readl(((void __iomem *)adev->rmmio) + (reg * 4)); + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); else { unsigned long flags; - uint32_t ret; spin_lock_irqsave(&adev->mmio_idx_lock, flags); writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - - return ret; } + trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); + return ret; } void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, bool always_indirect) { + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -352,7 +353,7 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev) adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2); - adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), + adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); if (adev->doorbell.num_doorbells == 0) return -EINVAL; @@ -831,8 +832,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) */ static void amdgpu_atombios_fini(struct amdgpu_device *adev) { - if (adev->mode_info.atom_context) + if (adev->mode_info.atom_context) { kfree(adev->mode_info.atom_context->scratch); + kfree(adev->mode_info.atom_context->iio); + } kfree(adev->mode_info.atom_context); adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); @@ -942,15 +945,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) } if (amdgpu_gart_size != -1) { - /* gtt size must be power of two and greater or equal to 32M */ + /* gtt size must be greater or equal to 32M */ if (amdgpu_gart_size < 32) { dev_warn(adev->dev, "gart size (%d) too small\n", amdgpu_gart_size); amdgpu_gart_size = -1; - } else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) { - dev_warn(adev->dev, "gart size (%d) must be a power of 2\n", - amdgpu_gart_size); - amdgpu_gart_size = -1; } } @@ -1076,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, state); if (r) return r; + break; } } return r; @@ -1093,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, state); if (r) return r; + break; } } return r; } +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); + if (r) + return r; + break; + } + } + return 0; + +} + +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) + return adev->ip_blocks[i].funcs->is_idle((void *)adev); + } + return true; + +} + const struct amdgpu_ip_block_version * amdgpu_get_ip_block( struct amdgpu_device *adev, enum amd_ip_block_type type) @@ -1150,6 +1189,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev) case CHIP_TOPAZ: case CHIP_TONGA: case CHIP_FIJI: + case CHIP_POLARIS11: + case CHIP_POLARIS10: case CHIP_CARRIZO: case CHIP_STONEY: if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) @@ -1202,7 +1243,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) if (r == -ENOENT) { adev->ip_block_status[i].valid = false; } else if (r) { - DRM_ERROR("early_init %d failed %d\n", i, r); + DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } else { adev->ip_block_status[i].valid = true; @@ -1213,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) } } + adev->cg_flags &= amdgpu_cg_mask; + adev->pg_flags &= amdgpu_pg_mask; + return 0; } @@ -1225,7 +1269,7 @@ static int amdgpu_init(struct amdgpu_device *adev) continue; r = adev->ip_blocks[i].funcs->sw_init((void *)adev); if (r) { - DRM_ERROR("sw_init %d failed %d\n", i, r); + DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } adev->ip_block_status[i].sw = true; @@ -1258,7 +1302,7 @@ static int amdgpu_init(struct amdgpu_device *adev) continue; r = adev->ip_blocks[i].funcs->hw_init((void *)adev); if (r) { - DRM_ERROR("hw_init %d failed %d\n", i, r); + DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } adev->ip_block_status[i].hw = true; @@ -1278,13 +1322,13 @@ static int amdgpu_late_init(struct amdgpu_device *adev) r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, AMD_CG_STATE_GATE); if (r) { - DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r); + DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } if (adev->ip_blocks[i].funcs->late_init) { r = adev->ip_blocks[i].funcs->late_init((void *)adev); if (r) { - DRM_ERROR("late_init %d failed %d\n", i, r); + DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } } @@ -1308,13 +1352,13 @@ static int amdgpu_fini(struct amdgpu_device *adev) r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, AMD_CG_STATE_UNGATE); if (r) { - DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r); + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { - DRM_DEBUG("hw_fini %d failed %d\n", i, r); + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); } adev->ip_block_status[i].hw = false; } @@ -1325,12 +1369,17 @@ static int amdgpu_fini(struct amdgpu_device *adev) r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); /* XXX handle errors */ if (r) { - DRM_DEBUG("sw_fini %d failed %d\n", i, r); + DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); } adev->ip_block_status[i].sw = false; adev->ip_block_status[i].valid = false; } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (adev->ip_blocks[i].funcs->late_fini) + adev->ip_blocks[i].funcs->late_fini((void *)adev); + } + return 0; } @@ -1338,20 +1387,29 @@ static int amdgpu_suspend(struct amdgpu_device *adev) { int i, r; + /* ungate SMC block first */ + r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, + AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r); + } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_block_status[i].valid) continue; /* ungate blocks so that suspend can properly shut them down */ - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r); + if (i != AMD_IP_BLOCK_TYPE_SMC) { + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + } } /* XXX handle errors */ r = adev->ip_blocks[i].funcs->suspend(adev); /* XXX handle errors */ if (r) { - DRM_ERROR("suspend %d failed %d\n", i, r); + DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); } } @@ -1367,7 +1425,7 @@ static int amdgpu_resume(struct amdgpu_device *adev) continue; r = adev->ip_blocks[i].funcs->resume(adev); if (r) { - DRM_ERROR("resume %d failed %d\n", i, r); + DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } } @@ -1375,6 +1433,15 @@ static int amdgpu_resume(struct amdgpu_device *adev) return 0; } +static bool amdgpu_device_is_virtual(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /** * amdgpu_device_init - initialize the driver * @@ -1421,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; adev->didt_wreg = &amdgpu_invalid_wreg; + adev->gc_cac_rreg = &amdgpu_invalid_rreg; + adev->gc_cac_wreg = &amdgpu_invalid_wreg; adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); @@ -1448,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->pcie_idx_lock); spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); + spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); adev->rmmio_base = pci_resource_start(adev->pdev, 5); @@ -1485,36 +1556,45 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_runtime_pm == 1) runtime = true; - if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) + if (amdgpu_device_is_px(ddev)) runtime = true; vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); if (runtime) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); /* Read BIOS */ - if (!amdgpu_get_bios(adev)) - return -EINVAL; + if (!amdgpu_get_bios(adev)) { + r = -EINVAL; + goto failed; + } /* Must be an ATOMBIOS */ if (!adev->is_atom_bios) { dev_err(adev->dev, "Expecting atombios for GPU\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); - return r; + goto failed; } /* See if the asic supports SR-IOV */ adev->virtualization.supports_sr_iov = amdgpu_atombios_has_gpu_virtualization_table(adev); + /* Check if we are executing in a virtualized environment */ + adev->virtualization.is_virtual = amdgpu_device_is_virtual(); + adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); + /* Post card if necessary */ if (!amdgpu_card_posted(adev) || - adev->virtualization.supports_sr_iov) { + (adev->virtualization.is_virtual && + !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } DRM_INFO("GPU not posted. posting now...\n"); amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1524,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -1533,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); - return r; + goto failed; } /* init the mode config */ @@ -1543,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); amdgpu_fini(adev); - return r; + goto failed; } adev->accel_working = true; @@ -1553,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); - return r; + goto failed; } r = amdgpu_ib_ring_tests(adev); @@ -1570,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_ERROR("registering register debugfs failed (%d).\n", r); } + r = amdgpu_debugfs_firmware_init(adev); + if (r) { + DRM_ERROR("registering firmware debugfs failed (%d).\n", r); + return r; + } + if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -1595,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); - return r; + goto failed; } return 0; + +failed: + if (runtime) + vga_switcheroo_fini_domain_pm_ops(adev->dev); + return r; } static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); @@ -1621,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_bo_evict_vram(adev); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); + drm_crtc_force_disable_all(adev->ddev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); kfree(adev->ip_block_status); @@ -1632,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; vga_switcheroo_unregister_client(adev->pdev); + if (adev->flags & AMD_IS_PX) + vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); if (adev->rio_mem) pci_iounmap(adev->pdev, adev->rio_mem); @@ -1817,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + /* + * Most of the connector probing functions try to acquire runtime pm + * refs to ensure that the GPU is powered on when connector polling is + * performed. Since we're calling this from a runtime PM callback, + * trying to acquire rpm refs will cause us to deadlock. + * + * Since we're guaranteed to be holding the rpm lock, it's safe to + * temporarily disable the rpm helpers so this doesn't deadlock us. + */ +#ifdef CONFIG_PM + dev->dev->power.disable_depth++; +#endif drm_helper_hpd_irq_event(dev); +#ifdef CONFIG_PM + dev->dev->power.disable_depth--; +#endif if (fbcon) { amdgpu_fbdev_set_suspend(adev, 0); @@ -1837,11 +1947,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) */ int amdgpu_gpu_reset(struct amdgpu_device *adev) { - unsigned ring_sizes[AMDGPU_MAX_RINGS]; - uint32_t *ring_data[AMDGPU_MAX_RINGS]; - - bool saved = false; - int i, r; int resched; @@ -1850,22 +1955,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - r = amdgpu_suspend(adev); - + /* block scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring) continue; - - ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); - if (ring_sizes[i]) { - saved = true; - dev_info(adev->dev, "Saved %d dwords of commands " - "on ring %d.\n", ring_sizes[i], i); - } + kthread_park(ring->sched.thread); + amd_sched_hw_job_reset(&ring->sched); } + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ + amdgpu_fence_driver_force_completion(adev); + + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); retry: + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } + r = amdgpu_asic_reset(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1874,32 +1987,29 @@ retry: dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); r = amdgpu_resume(adev); } - + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); if (!r) { + r = amdgpu_ib_ring_tests(adev); + if (r) { + dev_err(adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_suspend(adev); + goto retry; + } + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; - - amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; - ring_data[i] = NULL; - } - - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - if (saved) { - saved = false; - r = amdgpu_suspend(adev); - goto retry; - } + amd_sched_job_recovery(&ring->sched); + kthread_unpark(ring->sched.thread); } } else { - amdgpu_fence_driver_force_completion(adev); + dev_err(adev->dev, "asic resume failed (%d).\n", r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (adev->rings[i]) - kfree(ring_data[i]); + if (adev->rings[i]) { + kthread_unpark(adev->rings[i]->sched.thread); + } } } @@ -1910,13 +2020,11 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } + amdgpu_irq_gpu_reset_resume_helper(adev); return r; } -#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */ -#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */ - void amdgpu_get_pcie_info(struct amdgpu_device *adev) { u32 mask; @@ -2013,7 +2121,7 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev) * Debugfs */ int amdgpu_debugfs_add_files(struct amdgpu_device *adev, - struct drm_info_list *files, + const struct drm_info_list *files, unsigned nfiles) { unsigned i; @@ -2070,20 +2178,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; + bool use_bank; + unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (*pos & (1ULL << 62)) { + se_bank = (*pos >> 24) & 0x3FF; + sh_bank = (*pos >> 34) & 0x3FF; + instance_bank = (*pos >> 44) & 0x3FF; + use_bank = 1; + *pos &= 0xFFFFFF; + } else { + use_bank = 0; + } + + if (use_bank) { + if (sh_bank >= adev->gfx.config.max_sh_per_se || + se_bank >= adev->gfx.config.max_shader_engines) + return -EINVAL; + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se_bank, + sh_bank, instance_bank); + } + while (size) { uint32_t value; if (*pos > adev->rmmio_size) - return result; + goto end; value = RREG32(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) - return r; + if (r) { + result = r; + goto end; + } result += 4; buf += 4; @@ -2091,6 +2222,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, size -= 4; } +end: + if (use_bank) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + return result; } @@ -2125,32 +2262,316 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, return result; } +static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + value = RREG32_PCIE(*pos >> 2); + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32_PCIE(*pos >> 2, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + value = RREG32_DIDT(*pos >> 2); + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32_DIDT(*pos >> 2, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + value = RREG32_SMC(*pos >> 2); + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32_SMC(*pos >> 2, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + uint32_t *config, no_regs = 0; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + config = kmalloc(256 * sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + /* version, increment each time something is added */ + config[no_regs++] = 0; + config[no_regs++] = adev->gfx.config.max_shader_engines; + config[no_regs++] = adev->gfx.config.max_tile_pipes; + config[no_regs++] = adev->gfx.config.max_cu_per_sh; + config[no_regs++] = adev->gfx.config.max_sh_per_se; + config[no_regs++] = adev->gfx.config.max_backends_per_se; + config[no_regs++] = adev->gfx.config.max_texture_channel_caches; + config[no_regs++] = adev->gfx.config.max_gprs; + config[no_regs++] = adev->gfx.config.max_gs_threads; + config[no_regs++] = adev->gfx.config.max_hw_contexts; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; + config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.num_tile_pipes; + config[no_regs++] = adev->gfx.config.backend_enable_mask; + config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; + config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; + config[no_regs++] = adev->gfx.config.shader_engine_tile_size; + config[no_regs++] = adev->gfx.config.num_gpus; + config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; + config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; + config[no_regs++] = adev->gfx.config.gb_addr_config; + config[no_regs++] = adev->gfx.config.num_rbs; + + while (size && (*pos < no_regs * 4)) { + uint32_t value; + + value = config[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + kfree(config); + return r; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + kfree(config); + return result; +} + + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, .write = amdgpu_debugfs_regs_write, .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_regs_didt_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_didt_read, + .write = amdgpu_debugfs_regs_didt_write, + .llseek = default_llseek +}; +static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_pcie_read, + .write = amdgpu_debugfs_regs_pcie_write, + .llseek = default_llseek +}; +static const struct file_operations amdgpu_debugfs_regs_smc_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_smc_read, + .write = amdgpu_debugfs_regs_smc_write, + .llseek = default_llseek +}; + +static const struct file_operations amdgpu_debugfs_gca_config_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gca_config_read, + .llseek = default_llseek +}; + +static const struct file_operations *debugfs_regs[] = { + &amdgpu_debugfs_regs_fops, + &amdgpu_debugfs_regs_didt_fops, + &amdgpu_debugfs_regs_pcie_fops, + &amdgpu_debugfs_regs_smc_fops, + &amdgpu_debugfs_gca_config_fops, +}; + +static const char *debugfs_regs_names[] = { + "amdgpu_regs", + "amdgpu_regs_didt", + "amdgpu_regs_pcie", + "amdgpu_regs_smc", + "amdgpu_gca_config", +}; static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { struct drm_minor *minor = adev->ddev->primary; struct dentry *ent, *root = minor->debugfs_root; + unsigned i, j; + + for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { + ent = debugfs_create_file(debugfs_regs_names[i], + S_IFREG | S_IRUGO, root, + adev, debugfs_regs[i]); + if (IS_ERR(ent)) { + for (j = 0; j < i; j++) { + debugfs_remove(adev->debugfs_regs[i]); + adev->debugfs_regs[i] = NULL; + } + return PTR_ERR(ent); + } - ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root, - adev, &amdgpu_debugfs_regs_fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - i_size_write(ent->d_inode, adev->rmmio_size); - adev->debugfs_regs = ent; + if (!i) + i_size_write(ent->d_inode, adev->rmmio_size); + adev->debugfs_regs[i] = ent; + } return 0; } static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { - debugfs_remove(adev->debugfs_regs); - adev->debugfs_regs = NULL; + unsigned i; + + for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { + if (adev->debugfs_regs[i]) { + debugfs_remove(adev->debugfs_regs[i]); + adev->debugfs_regs[i] = NULL; + } + } } int amdgpu_debugfs_init(struct drm_minor *minor) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index f0ed974bd4e0..76f96028313d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) return true; - fence_put(*f); + fence_put(fence); return false; } @@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) spin_unlock_irqrestore(&crtc->dev->event_lock, flags); usleep_range(min_udelay, 2 * min_udelay); spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; + } if (!repcnt) DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " @@ -131,12 +131,17 @@ static void amdgpu_flip_work_func(struct work_struct *__work) vblank->framedur_ns / 1000, vblank->linedur_ns / 1000, stat, vpos, hpos); - /* set the flip status */ + /* Do the flip (mmio) */ + adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); + + /* Set the flip status */ amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - /* Do the flip (mmio) */ - adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); + + DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", + amdgpuCrtc->crtc_id, amdgpuCrtc, work); + } /* @@ -192,6 +197,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->adev = adev; work->crtc_id = amdgpu_crtc->crtc_id; + work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; /* schedule unpin of the old buffer */ old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); @@ -214,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); - goto cleanup; + goto unreserve; } r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, &work->shared_count, &work->shared); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); DRM_ERROR("failed to get fences for buffer\n"); - goto cleanup; + goto unpin; } amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); @@ -234,7 +238,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, work->base = base; - r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -252,6 +256,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; amdgpu_crtc->pflip_works = work; + + DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, work); /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); @@ -259,16 +266,18 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } +unpin: if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } +unreserve: amdgpu_bo_unreserve(new_rbo); cleanup: @@ -507,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - if (amdgpu_fb->obj) { - drm_gem_object_unreference_unlocked(amdgpu_fb->obj); - } + drm_gem_object_unreference_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } @@ -554,7 +561,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, struct amdgpu_framebuffer *amdgpu_fb; int ret; - obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " "can't create framebuffer\n", mode_cmd->handles[0]); @@ -588,20 +595,20 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = { .output_poll_changed = amdgpu_output_poll_changed }; -static struct drm_prop_enum_list amdgpu_underscan_enum_list[] = +static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = { { UNDERSCAN_OFF, "off" }, { UNDERSCAN_ON, "on" }, { UNDERSCAN_AUTO, "auto" }, }; -static struct drm_prop_enum_list amdgpu_audio_enum_list[] = +static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = { { AMDGPU_AUDIO_DISABLE, "off" }, { AMDGPU_AUDIO_ENABLE, "on" }, { AMDGPU_AUDIO_AUTO, "auto" }, }; /* XXX support different dither options? spatial, temporal, both, etc. */ -static struct drm_prop_enum_list amdgpu_dither_enum_list[] = +static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { { AMDGPU_FMT_DITHER_DISABLE, "off" }, { AMDGPU_FMT_DITHER_ENABLE, "on" }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 7b7f4aba60c0..fe36caf1b7d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -150,7 +150,7 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vrefresh = amdgpu_crtc->hw_mode.vrefresh; + vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f1e17d60055a..9aa533cf4ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -50,9 +50,12 @@ * KMS wrapper. * - 3.0.0 - initial driver * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) + * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same + * at the end of IBs. + * - 3.3.0 - Add VM support for UVD on supported hardware. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 1 +#define KMS_DRIVER_MINOR 3 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -80,8 +83,12 @@ int amdgpu_exp_hw_support = 0; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; +int amdgpu_powercontainment = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; +unsigned amdgpu_cg_mask = 0xffffffff; +unsigned amdgpu_pg_mask = 0xffffffff; +char *amdgpu_disable_cu = NULL; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -158,6 +165,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); #ifdef CONFIG_DRM_AMD_POWERPLAY MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); module_param_named(powerplay, amdgpu_powerplay, int, 0444); + +MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)"); +module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); #endif MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); @@ -166,7 +176,16 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); -static struct pci_device_id pciidlist[] = { +MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)"); +module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); + +MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); +module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); + +MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); +module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); + +static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, @@ -277,6 +296,28 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, /* stoney */ {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, + /* Polaris11 */ + {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + /* Polaris10 */ + {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0, 0, 0} }; @@ -389,7 +430,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); pci_ignore_hotplug(pdev); - pci_set_power_state(pdev, PCI_D3cold); + if (amdgpu_is_atpx_hybrid()) + pci_set_power_state(pdev, PCI_D3cold); + else if (!amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D3hot); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; return 0; @@ -406,7 +450,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(pdev, PCI_D0); + if (amdgpu_is_atpx_hybrid() || + !amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) @@ -491,7 +537,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | - DRIVER_PRIME | DRIVER_RENDER, + DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, .dev_priv_size = 0, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, @@ -514,7 +560,7 @@ static struct drm_driver kms_driver = { .irq_uninstall = amdgpu_irq_uninstall, .irq_handler = amdgpu_irq_handler, .ioctls = amdgpu_ioctls_kms, - .gem_free_object = amdgpu_gem_object_free, + .gem_free_object_unlocked = amdgpu_gem_object_free, .gem_open_object = amdgpu_gem_object_open, .gem_close_object = amdgpu_gem_object_close, .dumb_create = amdgpu_mode_dumb_create, @@ -553,22 +599,21 @@ static struct pci_driver amdgpu_kms_pci_driver = { .driver.pm = &amdgpu_pm_ops, }; + + static int __init amdgpu_init(void) { amdgpu_sync_init(); -#ifdef CONFIG_VGA_CONSOLE + amdgpu_fence_slab_init(); if (vgacon_text_force()) { DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); return -EINVAL; } -#endif DRM_INFO("amdgpu kernel modesetting enabled.\n"); driver = &kms_driver; pdriver = &amdgpu_kms_pci_driver; - driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); - /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); } @@ -579,6 +624,7 @@ static void __exit amdgpu_exit(void) drm_pci_exit(driver, pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); + amdgpu_fence_slab_fini(); } module_init(amdgpu_init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 4303b447efe8..0b109aebfec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -55,8 +55,21 @@ struct amdgpu_fence { }; static struct kmem_cache *amdgpu_fence_slab; -static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); +int amdgpu_fence_slab_init(void) +{ + amdgpu_fence_slab = kmem_cache_create( + "amdgpu_fence", sizeof(struct amdgpu_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!amdgpu_fence_slab) + return -ENOMEM; + return 0; +} + +void amdgpu_fence_slab_fini(void) +{ + kmem_cache_destroy(amdgpu_fence_slab); +} /* * Cast helper */ @@ -121,7 +134,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct fence **ptr; + struct fence *old, **ptr; uint32_t seq; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); @@ -141,7 +154,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) /* This function can't be called concurrently anyway, otherwise * emitting the fence would mess up the hardware ring buffer. */ - BUG_ON(rcu_dereference_protected(*ptr, 1)); + old = rcu_dereference_protected(*ptr, 1); + if (old && !fence_is_signaled(old)) { + DRM_INFO("rcu slot is busy\n"); + fence_wait(old, false); + } rcu_assign_pointer(*ptr, fence_get(&fence->base)); @@ -187,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) if (seq != ring->fence_drv.sync_seq) amdgpu_fence_schedule_fallback(ring); - while (last_seq != seq) { + if (unlikely(seq == last_seq)) + return; + + last_seq &= drv->num_fences_mask; + seq &= drv->num_fences_mask; + + do { struct fence *fence, **ptr; - ptr = &drv->fences[++last_seq & drv->num_fences_mask]; + ++last_seq; + last_seq &= drv->num_fences_mask; + ptr = &drv->fences[last_seq]; /* There is always exactly one thread signaling this fence slot */ fence = rcu_dereference_protected(*ptr, 1); - rcu_assign_pointer(*ptr, NULL); + RCU_INIT_POINTER(*ptr, NULL); - BUG_ON(!fence); + if (!fence) + continue; r = fence_signal(fence); if (!r) @@ -205,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) BUG(); fence_put(fence); - } + } while (last_seq != seq); } /** @@ -348,9 +374,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, (unsigned long)ring); - ring->fence_drv.num_fences_mask = num_hw_submission - 1; + ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; spin_lock_init(&ring->fence_drv.lock); - ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *), + ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), GFP_KERNEL); if (!ring->fence_drv.fences) return -ENOMEM; @@ -392,13 +418,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, */ int amdgpu_fence_driver_init(struct amdgpu_device *adev) { - if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { - amdgpu_fence_slab = kmem_cache_create( - "amdgpu_fence", sizeof(struct amdgpu_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!amdgpu_fence_slab) - return -ENOMEM; - } if (amdgpu_debugfs_fence_init(adev)) dev_err(adev->dev, "fence debugfs file creation failed\n"); @@ -433,13 +452,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amd_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) - fence_put(ring->fence_drv.fences[i]); + fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); ring->fence_drv.initialized = false; } - - if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) - kmem_cache_destroy(amdgpu_fence_slab); } /** @@ -635,7 +651,7 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) return 0; } -static struct drm_info_list amdgpu_debugfs_fence_list[] = { +static const struct drm_info_list amdgpu_debugfs_fence_list[] = { {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 7312d729d300..921bce2df0b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -238,18 +238,17 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - if (adev->gart.pages[p]) { - adev->gart.pages[p] = NULL; - adev->gart.pages_addr[p] = adev->dummy_page.addr; - page_base = adev->gart.pages_addr[p]; - if (!adev->gart.ptr) - continue; +#ifdef CONFIG_AMDGPU_GART_DEBUGFS + adev->gart.pages[p] = NULL; +#endif + page_base = adev->dummy_page.addr; + if (!adev->gart.ptr) + continue; - for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { - amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, - t, page_base, flags); - page_base += AMDGPU_GPU_PAGE_SIZE; - } + for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { + amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, + t, page_base, flags); + page_base += AMDGPU_GPU_PAGE_SIZE; } } mb(); @@ -287,10 +286,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - adev->gart.pages_addr[p] = dma_addr[i]; +#ifdef CONFIG_AMDGPU_GART_DEBUGFS adev->gart.pages[p] = pagelist[i]; +#endif if (adev->gart.ptr) { - page_base = adev->gart.pages_addr[p]; + page_base = dma_addr[i]; for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags); page_base += AMDGPU_GPU_PAGE_SIZE; @@ -312,11 +312,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, */ int amdgpu_gart_init(struct amdgpu_device *adev) { - int r, i; + int r; - if (adev->gart.pages) { + if (adev->dummy_page.page) return 0; - } + /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */ if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) { DRM_ERROR("Page size is smaller than GPU page size!\n"); @@ -330,22 +330,16 @@ int amdgpu_gart_init(struct amdgpu_device *adev) adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE; DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); + +#ifdef CONFIG_AMDGPU_GART_DEBUGFS /* Allocate pages table */ adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); if (adev->gart.pages == NULL) { amdgpu_gart_fini(adev); return -ENOMEM; } - adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * - adev->gart.num_cpu_pages); - if (adev->gart.pages_addr == NULL) { - amdgpu_gart_fini(adev); - return -ENOMEM; - } - /* set GART entry to point to the dummy page by default */ - for (i = 0; i < adev->gart.num_cpu_pages; i++) { - adev->gart.pages_addr[i] = adev->dummy_page.addr; - } +#endif + return 0; } @@ -358,15 +352,14 @@ int amdgpu_gart_init(struct amdgpu_device *adev) */ void amdgpu_gart_fini(struct amdgpu_device *adev) { - if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) { + if (adev->gart.ready) { /* unbind pages */ amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages); } adev->gart.ready = false; +#ifdef CONFIG_AMDGPU_GART_DEBUGFS vfree(adev->gart.pages); - vfree(adev->gart.pages_addr); adev->gart.pages = NULL; - adev->gart.pages_addr = NULL; - +#endif amdgpu_dummy_page_fini(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index c3f4e85594ff..503d54098128 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -43,7 +43,7 @@ struct amdgpu_ring; struct amdgpu_bo; struct amdgpu_gds_asic_info { - uint32_t total_size; + uint32_t total_size; uint32_t gfx_partition_size; uint32_t cs_partition_size; }; @@ -52,8 +52,8 @@ struct amdgpu_gds { struct amdgpu_gds_asic_info mem; struct amdgpu_gds_asic_info gws; struct amdgpu_gds_asic_info oa; - /* At present, GDS, GWS and OA resources for gfx (graphics) - * is always pre-allocated and available for graphics operation. + /* At present, GDS, GWS and OA resources for gfx (graphics) + * is always pre-allocated and available for graphics operation. * Such resource is shared between all gfx clients. * TODO: move this operation to user space * */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index fa6a27bff298..88fbed2389c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -93,7 +93,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) struct drm_device *ddev = adev->ddev; struct drm_file *file; - mutex_lock(&ddev->struct_mutex); + mutex_lock(&ddev->filelist_mutex); list_for_each_entry(file, &ddev->filelist, lhead) { struct drm_gem_object *gobj; @@ -103,13 +103,13 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, handle) { WARN_ONCE(1, "And also active allocations!\n"); - drm_gem_object_unreference(gobj); + drm_gem_object_unreference_unlocked(gobj); } idr_destroy(&file->object_idr); spin_unlock(&file->table_lock); } - mutex_unlock(&ddev->struct_mutex); + mutex_unlock(&ddev->filelist_mutex); } /* @@ -338,7 +338,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, struct drm_gem_object *gobj; struct amdgpu_bo *robj; - gobj = drm_gem_object_lookup(dev, filp, handle); + gobj = drm_gem_object_lookup(filp, handle); if (gobj == NULL) { return -ENOENT; } @@ -402,7 +402,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, int r = 0; long ret; - gobj = drm_gem_object_lookup(dev, filp, handle); + gobj = drm_gem_object_lookup(filp, handle); if (gobj == NULL) { return -ENOENT; } @@ -436,7 +436,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, int r = -1; DRM_DEBUG("%d \n", args->handle); - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; robj = gem_to_amdgpu_bo(gobj); @@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (r) goto error_print; - amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); + amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates); list_for_each_entry(entry, &list, head) { domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); /* if anything is swapped out don't swap it in here, @@ -584,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; rbo = gem_to_amdgpu_bo(gobj); @@ -646,7 +646,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct amdgpu_bo *robj; int r; - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) { return -ENOENT; } @@ -769,7 +769,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) struct drm_file *file; int r; - r = mutex_lock_interruptible(&dev->struct_mutex); + r = mutex_lock_interruptible(&dev->filelist_mutex); if (r) return r; @@ -793,11 +793,11 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) spin_unlock(&file->table_lock); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->filelist_mutex); return 0; } -static struct drm_info_list amdgpu_debugfs_gem_list[] = { +static const struct drm_info_list amdgpu_debugfs_gem_list[] = { {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, }; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9f95da4f0536..a074edd95c70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) } } } + +/** + * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter + * + * @mask: array in which the per-shader array disable masks will be stored + * @max_se: number of SEs + * @max_sh: number of SHs + * + * The bitmask of CUs to be disabled in the shader array determined by se and + * sh is stored in mask[se * max_sh + sh]. + */ +void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) +{ + unsigned se, sh, cu; + const char *p; + + memset(mask, 0, sizeof(*mask) * max_se * max_sh); + + if (!amdgpu_disable_cu || !*amdgpu_disable_cu) + return; + + p = amdgpu_disable_cu; + for (;;) { + char *next; + int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); + if (ret < 3) { + DRM_ERROR("amdgpu: could not parse disable_cu\n"); + return; + } + + if (se < max_se && sh < max_sh && cu < 16) { + DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); + mask[se * max_sh + sh] |= 1u << cu; + } else { + DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", + se, sh, cu); + } + + next = strchr(p, ','); + if (!next) + break; + p = next + 1; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index dc06cbda7be6..51321e154c09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -27,4 +27,6 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); +unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 8443cea6821a..a31d7ef3032c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -33,6 +33,8 @@ #include "amdgpu.h" #include "atom.h" +#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) + /* * IB * IBs (Indirect Buffers) and areas of GPU accessible memory where @@ -74,9 +76,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } - ib->vm = vm; - ib->vm_id = 0; - return 0; } @@ -89,7 +88,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, * * Free an IB (all asics). */ -void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f) +void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, + struct fence *f) { amdgpu_sa_bo_free(adev, &ib->sa_bo, f); } @@ -117,28 +117,36 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fen */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ibs, struct fence *last_vm_update, - struct fence **f) + struct amdgpu_job *job, struct fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; - struct amdgpu_ctx *ctx, *old_ctx; + bool skip_preamble, need_ctx_switch; + unsigned patch_offset = ~0; struct amdgpu_vm *vm; - struct fence *hwf; + uint64_t ctx; + unsigned i; int r = 0; if (num_ibs == 0) return -EINVAL; - ctx = ibs->ctx; - vm = ibs->vm; + /* ring tests don't use a job */ + if (job) { + vm = job->vm; + ctx = job->ctx; + } else { + vm = NULL; + ctx = 0; + } if (!ring->ready) { dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } - if (vm && !ibs->vm_id) { + if (vm && !job->vm_id) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } @@ -149,58 +157,59 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return r; } + if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec) + patch_offset = amdgpu_ring_init_cond_exec(ring); + if (vm) { - /* do context switch */ - amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr, - ib->gds_base, ib->gds_size, - ib->gws_base, ib->gws_size, - ib->oa_base, ib->oa_size); - - if (ring->funcs->emit_hdp_flush) - amdgpu_ring_emit_hdp_flush(ring); + r = amdgpu_vm_flush(ring, job); + if (r) { + amdgpu_ring_undo(ring); + return r; + } } - old_ctx = ring->current_ctx; + if (ring->funcs->emit_hdp_flush) + amdgpu_ring_emit_hdp_flush(ring); + + /* always set cond_exec_polling to CONTINUE */ + *ring->cond_exe_cpu_addr = 1; + + skip_preamble = ring->current_ctx == ctx; + need_ctx_switch = ring->current_ctx != ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; - if (ib->ctx != ctx || ib->vm != vm) { - ring->current_ctx = old_ctx; - if (ib->vm_id) - amdgpu_vm_reset_id(adev, ib->vm_id); - amdgpu_ring_undo(ring); - return -EINVAL; - } - amdgpu_ring_emit_ib(ring, ib); - ring->current_ctx = ctx; - } + /* drop preamble IBs if we don't have a context switch */ + if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) + continue; - if (vm) { - if (ring->funcs->emit_hdp_invalidate) - amdgpu_ring_emit_hdp_invalidate(ring); + amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, + need_ctx_switch); + need_ctx_switch = false; } - r = amdgpu_fence_emit(ring, &hwf); + if (ring->funcs->emit_hdp_invalidate) + amdgpu_ring_emit_hdp_invalidate(ring); + + r = amdgpu_fence_emit(ring, f); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); - ring->current_ctx = old_ctx; - if (ib->vm_id) - amdgpu_vm_reset_id(adev, ib->vm_id); + if (job && job->vm_id) + amdgpu_vm_reset_id(adev, job->vm_id); amdgpu_ring_undo(ring); return r; } /* wrap the last IB with fence */ - if (ib->user) { - uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); - addr += ib->user->offset; - amdgpu_ring_emit_fence(ring, addr, ib->sequence, + if (job && job->uf_addr) { + amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, AMDGPU_FENCE_FLAG_64BIT); } - if (f) - *f = fence_get(hwf); + if (patch_offset != ~0 && ring->funcs->patch_cond_exec) + amdgpu_ring_patch_cond_exec(ring, patch_offset); + ring->current_ctx = ctx; amdgpu_ring_commit(ring); return 0; } @@ -279,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) if (!ring || !ring->ready) continue; - r = amdgpu_ring_test_ib(ring); + r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); if (r) { ring->ready = false; @@ -315,7 +324,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) } -static struct drm_info_list amdgpu_debugfs_sa_list[] = { +static const struct drm_info_list amdgpu_debugfs_sa_list[] = { {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index f594cfaa97e5..278708f5a744 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -219,6 +219,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (r) { return r; } + /* enable msi */ adev->irq.msi_enabled = false; @@ -382,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev, return r; } +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) +{ + int i, j; + for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) { + struct amdgpu_irq_src *src = adev->irq.sources[i]; + if (!src) + continue; + for (j = 0; j < src->num_types; j++) + amdgpu_irq_update(adev, src, j); + } +} + /** * amdgpu_irq_get - enable interrupt * @@ -496,7 +509,7 @@ static int amdgpu_irqdomain_map(struct irq_domain *d, return 0; } -static struct irq_domain_ops amdgpu_hw_irqdomain_ops = { +static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { .map = amdgpu_irqdomain_map, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index e124b59f39c1..7ef09352e534 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev); int amdgpu_irq_add_domain(struct amdgpu_device *adev); void amdgpu_irq_remove_domain(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 9c9b19e2f353..6674d40eb3ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,8 +28,19 @@ #include "amdgpu.h" #include "amdgpu_trace.h" +static void amdgpu_job_timedout(struct amd_sched_job *s_job) +{ + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); + + DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", + job->base.sched->name, + atomic_read(&job->ring->fence_drv.last_seq), + job->ring->fence_drv.sync_seq); + amdgpu_gpu_reset(job->adev); +} + int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job) + struct amdgpu_job **job, struct amdgpu_vm *vm) { size_t size = sizeof(struct amdgpu_job); @@ -43,6 +54,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, return -ENOMEM; (*job)->adev = adev; + (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; @@ -56,7 +68,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, { int r; - r = amdgpu_job_alloc(adev, 1, job); + r = amdgpu_job_alloc(adev, 1, job, NULL); if (r) return r; @@ -67,18 +79,32 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, return r; } -void amdgpu_job_free(struct amdgpu_job *job) +void amdgpu_job_free_resources(struct amdgpu_job *job) { - unsigned i; struct fence *f; + unsigned i; + /* use sched fence if available */ - f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; + f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; for (i = 0; i < job->num_ibs; ++i) - amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); + amdgpu_ib_free(job->adev, &job->ibs[i], f); +} + +void amdgpu_job_free_cb(struct amd_sched_job *s_job) +{ + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); + fence_put(job->fence); + amdgpu_sync_free(&job->sync); + kfree(job); +} - amdgpu_bo_unref(&job->uf.bo); +void amdgpu_job_free(struct amdgpu_job *job) +{ + amdgpu_job_free_resources(job); + + fence_put(job->fence); amdgpu_sync_free(&job->sync); kfree(job); } @@ -87,16 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { + int r; job->ring = ring; - job->base.sched = &ring->sched; - job->base.s_entity = entity; - job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); - if (!job->base.s_fence) - return -ENOMEM; - *f = fence_get(&job->base.s_fence->base); + if (!f) + return -EINVAL; + + r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); + if (r) + return r; job->owner = owner; + job->ctx = entity->fence_context; + *f = fence_get(&job->base.s_fence->finished); + amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; @@ -105,27 +135,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) { struct amdgpu_job *job = to_amdgpu_job(sched_job); - struct amdgpu_vm *vm = job->ibs->vm; + struct amdgpu_vm *vm = job->vm; struct fence *fence = amdgpu_sync_get_fence(&job->sync); - if (fence == NULL && vm && !job->ibs->vm_id) { + if (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; - unsigned i, vm_id; - uint64_t vm_pd_addr; int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->base, - &vm_id, &vm_pd_addr); + &job->base.s_fence->finished, + job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - else { - for (i = 0; i < job->num_ibs; ++i) { - job->ibs[i].vm_id = vm_id; - job->ibs[i].vm_pd_addr = vm_pd_addr; - } - } fence = amdgpu_sync_get_fence(&job->sync); } @@ -145,27 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); - r = amdgpu_sync_wait(&job->sync); - if (r) { - DRM_ERROR("failed to sync wait (%d)\n", r); - return NULL; - } + BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, - job->sync.last_vm_update, &fence); - if (r) { + job->sync.last_vm_update, job, &fence); + if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); - goto err; - } -err: - job->fence = fence; - amdgpu_job_free(job); + /* if gpu reset, hw fence will be replaced here */ + fence_put(job->fence); + job->fence = fence_get(fence); + amdgpu_job_free_resources(job); return fence; } -struct amd_sched_backend_ops amdgpu_sched_ops = { +const struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, + .timedout_job = amdgpu_job_timedout, + .free_job = amdgpu_job_free_cb }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 7805a8706af7..d942654a1de0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; - pm_runtime_get_sync(dev->dev); + if (amdgpu_device_is_px(dev)) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } amdgpu_amdkfd_device_fini(adev); @@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } out: - if (r) + if (r) { + /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ + if (adev->rmmio && amdgpu_device_is_px(dev)) + pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); - + } return r; } +static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, + struct drm_amdgpu_query_fw *query_fw, + struct amdgpu_device *adev) +{ + switch (query_fw->fw_type) { + case AMDGPU_INFO_FW_VCE: + fw_info->ver = adev->vce.fw_version; + fw_info->feature = adev->vce.fb_version; + break; + case AMDGPU_INFO_FW_UVD: + fw_info->ver = adev->uvd.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GMC: + fw_info->ver = adev->mc.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GFX_ME: + fw_info->ver = adev->gfx.me_fw_version; + fw_info->feature = adev->gfx.me_feature_version; + break; + case AMDGPU_INFO_FW_GFX_PFP: + fw_info->ver = adev->gfx.pfp_fw_version; + fw_info->feature = adev->gfx.pfp_feature_version; + break; + case AMDGPU_INFO_FW_GFX_CE: + fw_info->ver = adev->gfx.ce_fw_version; + fw_info->feature = adev->gfx.ce_feature_version; + break; + case AMDGPU_INFO_FW_GFX_RLC: + fw_info->ver = adev->gfx.rlc_fw_version; + fw_info->feature = adev->gfx.rlc_feature_version; + break; + case AMDGPU_INFO_FW_GFX_MEC: + if (query_fw->index == 0) { + fw_info->ver = adev->gfx.mec_fw_version; + fw_info->feature = adev->gfx.mec_feature_version; + } else if (query_fw->index == 1) { + fw_info->ver = adev->gfx.mec2_fw_version; + fw_info->feature = adev->gfx.mec2_feature_version; + } else + return -EINVAL; + break; + case AMDGPU_INFO_FW_SMC: + fw_info->ver = adev->pm.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_SDMA: + if (query_fw->index >= adev->sdma.num_instances) + return -EINVAL; + fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; + fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; + break; + default: + return -EINVAL; + } + return 0; +} + /* * Userspace get information ioctl */ @@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; } case AMDGPU_INFO_TIMESTAMP: - ui64 = amdgpu_asic_get_gpu_clock_counter(adev); + ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_FW_VERSION: { struct drm_amdgpu_info_firmware fw_info; + int ret; /* We only support one instance of each IP block right now. */ if (info->query_fw.ip_instance != 0) return -EINVAL; - switch (info->query_fw.fw_type) { - case AMDGPU_INFO_FW_VCE: - fw_info.ver = adev->vce.fw_version; - fw_info.feature = adev->vce.fb_version; - break; - case AMDGPU_INFO_FW_UVD: - fw_info.ver = 0; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GMC: - fw_info.ver = adev->mc.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GFX_ME: - fw_info.ver = adev->gfx.me_fw_version; - fw_info.feature = adev->gfx.me_feature_version; - break; - case AMDGPU_INFO_FW_GFX_PFP: - fw_info.ver = adev->gfx.pfp_fw_version; - fw_info.feature = adev->gfx.pfp_feature_version; - break; - case AMDGPU_INFO_FW_GFX_CE: - fw_info.ver = adev->gfx.ce_fw_version; - fw_info.feature = adev->gfx.ce_feature_version; - break; - case AMDGPU_INFO_FW_GFX_RLC: - fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = adev->gfx.rlc_feature_version; - break; - case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) { - fw_info.ver = adev->gfx.mec_fw_version; - fw_info.feature = adev->gfx.mec_feature_version; - } else if (info->query_fw.index == 1) { - fw_info.ver = adev->gfx.mec2_fw_version; - fw_info.feature = adev->gfx.mec2_feature_version; - } else - return -EINVAL; - break; - case AMDGPU_INFO_FW_SMC: - fw_info.ver = adev->pm.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_SDMA: - if (info->query_fw.index >= adev->sdma.num_instances) - return -EINVAL; - fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version; - fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version; - break; - default: - return -EINVAL; - } + ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); + if (ret) + return ret; + return copy_to_user(out, &fw_info, min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; } @@ -382,8 +400,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_amdgpu_info_vram_gtt vram_gtt; vram_gtt.vram_size = adev->mc.real_vram_size; + vram_gtt.vram_size -= adev->vram_pin_size; vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; - vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size; + vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); vram_gtt.gtt_size = adev->mc.gtt_size; vram_gtt.gtt_size -= adev->gart_pin_size; return copy_to_user(out, &vram_gtt, @@ -426,7 +445,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } case AMDGPU_INFO_DEV_INFO: { struct drm_amdgpu_info_device dev_info = {}; - struct amdgpu_cu_info cu_info; dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; @@ -447,7 +465,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.max_memory_clock = adev->pm.default_mclk * 10; } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; - dev_info.num_rb_pipes = adev->gfx.config.num_rbs; + dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines; dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; dev_info._pad = 0; dev_info.ids_flags = 0; @@ -460,11 +479,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; - amdgpu_asic_get_cu_info(adev, &cu_info); - dev_info.cu_active_number = cu_info.number; - dev_info.cu_ao_mask = cu_info.ao_cu_mask; + dev_info.cu_active_number = adev->gfx.cu_info.number; + dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; - memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); + memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], + sizeof(adev->gfx.cu_info.bitmap)); dev_info.vram_type = adev->mc.vram_type; dev_info.vram_bit_width = adev->mc.vram_width; dev_info.vce_harvest_config = adev->vce.harvest_config; @@ -565,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + amdgpu_uvd_free_handles(adev, file_priv); + amdgpu_vce_free_handles(adev, file_priv); + amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) @@ -589,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; - - amdgpu_uvd_free_handles(adev, file_priv); - amdgpu_vce_free_handles(adev, file_priv); } /* @@ -754,4 +772,131 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; -int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); +const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); + +/* + * Debugfs info + */ +#if defined(CONFIG_DEBUG_FS) + +static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + struct drm_amdgpu_info_firmware fw_info; + struct drm_amdgpu_query_fw query_fw; + int ret, i; + + /* VCE */ + query_fw.fw_type = AMDGPU_INFO_FW_VCE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* UVD */ + query_fw.fw_type = AMDGPU_INFO_FW_UVD; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* GMC */ + query_fw.fw_type = AMDGPU_INFO_FW_GMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* ME */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* PFP */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* CE */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* RLC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; + query_fw.index = 0; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC2 */ + if (adev->asic_type == CHIP_KAVERI || + (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { + query_fw.index = 1; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + } + + /* SMC */ + query_fw.fw_type = AMDGPU_INFO_FW_SMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* SDMA */ + query_fw.fw_type = AMDGPU_INFO_FW_SDMA; + for (i = 0; i < adev->sdma.num_instances; i++) { + query_fw.index = i; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", + i, fw_info.feature, fw_info.ver); + } + + return 0; +} + +static const struct drm_info_list amdgpu_firmware_info_list[] = { + {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, +}; +#endif + +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, + ARRAY_SIZE(amdgpu_firmware_info_list)); +#else + return 0; +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 9f4a45cd2aab..32fa7b7913f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -232,7 +232,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) int r; mutex_lock(&adev->mn_lock); - down_write(&mm->mmap_sem); + if (down_write_killable(&mm->mmap_sem)) { + mutex_unlock(&adev->mn_lock); + return ERR_PTR(-EINTR); + } hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) if (rmn->mm == mm) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 8d432e6901af..6b1d7d306564 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -53,7 +53,7 @@ struct amdgpu_hpd; #define AMDGPU_MAX_HPD_PINS 6 #define AMDGPU_MAX_CRTCS 6 -#define AMDGPU_MAX_AFMT_BLOCKS 7 +#define AMDGPU_MAX_AFMT_BLOCKS 9 enum amdgpu_rmx_type { RMX_OFF, @@ -283,7 +283,7 @@ struct amdgpu_display_funcs { u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev); /* pageflipping */ void (*page_flip)(struct amdgpu_device *adev, - int crtc_id, u64 crtc_base); + int crtc_id, u64 crtc_base, bool async); int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position); /* display topology setup */ @@ -309,8 +309,8 @@ struct amdgpu_mode_info { struct atom_context *atom_context; struct card_info *atom_card_info; bool mode_config_initialized; - struct amdgpu_crtc *crtcs[6]; - struct amdgpu_afmt *afmt[7]; + struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; + struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; /* DVI-I properties */ struct drm_property *coherent_mode_property; /* DAC enable load detect */ @@ -530,7 +530,7 @@ struct amdgpu_framebuffer { ((em) == ATOM_ENCODER_MODE_DP_MST)) /* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ -#define USE_REAL_VBLANKSTART (1 << 30) +#define USE_REAL_VBLANKSTART (1 << 30) #define GET_DISTANCE_TO_VBLANKSTART (1 << 31) void amdgpu_link_encoder_connector(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 151a2d42c639..6f0873c75a25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, bo->pin_count = 1; if (gpu_addr != NULL) *gpu_addr = amdgpu_bo_gpu_offset(bo); - if (domain == AMDGPU_GEM_DOMAIN_VRAM) + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { bo->adev->vram_pin_size += amdgpu_bo_size(bo); - else + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size += amdgpu_bo_size(bo); + } else bo->adev->gart_pin_size += amdgpu_bo_size(bo); } else { dev_err(bo->adev->dev, "%p pin failed\n", bo); @@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { bo->adev->vram_pin_size -= amdgpu_bo_size(bo); - else + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); + } else bo->adev->gart_pin_size -= amdgpu_bo_size(bo); } else { dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); @@ -476,6 +480,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); } +static const char *amdgpu_vram_names[] = { + "UNKNOWN", + "GDDR1", + "DDR2", + "GDDR3", + "GDDR4", + "GDDR5", + "HBM", + "DDR3" +}; + int amdgpu_bo_init(struct amdgpu_device *adev) { /* Add an MTRR for the VRAM */ @@ -484,8 +499,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev) DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", adev->mc.mc_vram_size >> 20, (unsigned long long)adev->mc.aper_size >> 20); - DRM_INFO("RAM width %dbits DDR\n", - adev->mc.vram_width); + DRM_INFO("RAM width %dbits %s\n", + adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); return amdgpu_ttm_init(adev); } @@ -526,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (!metadata_size) { if (bo->metadata_size) { kfree(bo->metadata); + bo->metadata = NULL; bo->metadata_size = 0; } return 0; @@ -573,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct amdgpu_bo *rbo; + struct ttm_mem_reg *old_mem = &bo->mem; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return; @@ -586,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, /* move_notify is called before move happens */ amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); + + trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); } int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) @@ -608,6 +627,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) if ((offset + size) <= adev->mc.visible_vram_size) return 0; + /* Can't move a pinned BO to visible VRAM */ + if (abo->pin_count > 0) + return -EINVAL; + /* hurrah the memory is not visible ! */ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index acc08018c6cc..bdb01d932548 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -71,7 +71,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) { int r; - r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); + r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(bo->adev->dev, "%p reserve failed\n", bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ff9597ce268c..ff63b88b0ffa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state = 0; - long idx; + unsigned long idx; int ret; if (strlen(buf) == 1) adev->pp_force_state_enabled = false; - else { - ret = kstrtol(buf, 0, &idx); + else if (adev->pp_enabled) { + struct pp_states_info data; - if (ret) { + ret = kstrtoul(buf, 0, &idx); + if (ret || idx >= ARRAY_SIZE(data.states)) { count = -EINVAL; goto fail; } - if (adev->pp_enabled) { - struct pp_states_info data; - amdgpu_dpm_get_pp_num_states(adev, &data); - state = data.states[idx]; - /* only set user selected power states */ - if (state != POWER_STATE_TYPE_INTERNAL_BOOT && - state != POWER_STATE_TYPE_DEFAULT) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); - adev->pp_force_state_enabled = true; - } + amdgpu_dpm_get_pp_num_states(adev, &data); + state = data.states[idx]; + /* only set user selected power states */ + if (state != POWER_STATE_TYPE_INTERNAL_BOOT && + state != POWER_STATE_TYPE_DEFAULT) { + amdgpu_dpm_dispatch_task(adev, + AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + adev->pp_force_state_enabled = true; } } fail: @@ -349,6 +347,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); return size; } @@ -362,16 +362,27 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret; long level; + uint32_t i, mask = 0; + char sub_str[2]; - ret = kstrtol(buf, 0, &level); + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; + sub_str[0] = *(buf + i); + sub_str[1] = '\0'; + ret = kstrtol(sub_str, 0, &level); - if (ret) { - count = -EINVAL; - goto fail; + if (ret) { + count = -EINVAL; + goto fail; + } + mask |= 1 << level; } if (adev->pp_enabled) - amdgpu_dpm_force_clock_level(adev, PP_SCLK, level); + amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask); fail: return count; } @@ -386,6 +397,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); return size; } @@ -399,16 +412,27 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret; long level; + uint32_t i, mask = 0; + char sub_str[2]; - ret = kstrtol(buf, 0, &level); + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; + sub_str[0] = *(buf + i); + sub_str[1] = '\0'; + ret = kstrtol(sub_str, 0, &level); - if (ret) { - count = -EINVAL; - goto fail; + if (ret) { + count = -EINVAL; + goto fail; + } + mask |= 1 << level; } if (adev->pp_enabled) - amdgpu_dpm_force_clock_level(adev, PP_MCLK, level); + amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask); fail: return count; } @@ -423,6 +447,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); return size; } @@ -436,16 +462,119 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret; long level; + uint32_t i, mask = 0; + char sub_str[2]; + + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; + sub_str[0] = *(buf + i); + sub_str[1] = '\0'; + ret = kstrtol(sub_str, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + mask |= 1 << level; + } + + if (adev->pp_enabled) + amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; - ret = kstrtol(buf, 0, &level); + if (adev->pp_enabled) + value = amdgpu_dpm_get_sclk_od(adev); + else if (adev->pm.funcs->get_sclk_od) + value = adev->pm.funcs->get_sclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); if (ret) { count = -EINVAL; goto fail; } + if (adev->pp_enabled) { + amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_sclk_od) { + adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + +fail: + return count; +} + +static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; + if (adev->pp_enabled) - amdgpu_dpm_force_clock_level(adev, PP_PCIE, level); + value = amdgpu_dpm_get_mclk_od(adev); + else if (adev->pm.funcs->get_mclk_od) + value = adev->pm.funcs->get_mclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_mclk_od) { + adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + fail: return count; } @@ -471,6 +600,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, amdgpu_get_pp_dpm_pcie, amdgpu_set_pp_dpm_pcie); +static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_sclk_od, + amdgpu_set_pp_sclk_od); +static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_mclk_od, + amdgpu_set_pp_mclk_od); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -1089,22 +1224,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_table\n"); return ret; } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; - } } + + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_sclk_od\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_mclk_od\n"); + return ret; + } + ret = amdgpu_debugfs_pm_init(adev); if (ret) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); @@ -1127,10 +1274,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_cur_state); device_remove_file(adev->dev, &dev_attr_pp_force_state); device_remove_file(adev->dev, &dev_attr_pp_table); - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); } + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + device_remove_file(adev->dev, &dev_attr_pp_sclk_od); + device_remove_file(adev->dev, &dev_attr_pp_mclk_od); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) @@ -1212,7 +1361,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) return 0; } -static struct drm_info_list amdgpu_pm_info_list[] = { +static const struct drm_info_list amdgpu_pm_info_list[] = { {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, }; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 3cb6d6c413c7..c5738a22b690 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); + pp_init->powercontainment_enabled = amdgpu_powercontainment; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); @@ -99,6 +100,10 @@ static int amdgpu_pp_early_init(void *handle) #ifdef CONFIG_DRM_AMD_POWERPLAY switch (adev->asic_type) { + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->pp_enabled = true; + break; case CHIP_TONGA: case CHIP_FIJI: adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; @@ -143,7 +148,7 @@ static int amdgpu_pp_late_init(void *handle) adev->powerplay.pp_handle); #ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { + if (adev->pp_enabled && adev->pm.dpm_enabled) { amdgpu_pm_sysfs_init(adev); amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); } @@ -161,12 +166,8 @@ static int amdgpu_pp_sw_init(void *handle) adev->powerplay.pp_handle); #ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { - if (amdgpu_dpm == 0) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - } + if (adev->pp_enabled) + adev->pm.dpm_enabled = true; #endif return ret; @@ -183,13 +184,6 @@ static int amdgpu_pp_sw_fini(void *handle) if (ret) return ret; -#ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { - amdgpu_pm_sysfs_fini(adev); - amd_powerplay_fini(adev->powerplay.pp_handle); - } -#endif - return ret; } @@ -223,6 +217,22 @@ static int amdgpu_pp_hw_fini(void *handle) return ret; } +static void amdgpu_pp_late_fini(void *handle) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pp_enabled) { + amdgpu_pm_sysfs_fini(adev); + amd_powerplay_fini(adev->powerplay.pp_handle); + } + + if (adev->powerplay.ip_funcs->late_fini) + adev->powerplay.ip_funcs->late_fini( + adev->powerplay.pp_handle); +#endif +} + static int amdgpu_pp_suspend(void *handle) { int ret = 0; @@ -303,28 +313,20 @@ static int amdgpu_pp_soft_reset(void *handle) return ret; } -static void amdgpu_pp_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->powerplay.ip_funcs->print_status) - adev->powerplay.ip_funcs->print_status( - adev->powerplay.pp_handle); -} - const struct amd_ip_funcs amdgpu_pp_ip_funcs = { + .name = "amdgpu_powerplay", .early_init = amdgpu_pp_early_init, .late_init = amdgpu_pp_late_init, .sw_init = amdgpu_pp_sw_init, .sw_fini = amdgpu_pp_sw_fini, .hw_init = amdgpu_pp_hw_init, .hw_fini = amdgpu_pp_hw_fini, + .late_fini = amdgpu_pp_late_fini, .suspend = amdgpu_pp_suspend, .resume = amdgpu_pp_resume, .is_idle = amdgpu_pp_is_idle, .wait_for_idle = amdgpu_pp_wait_for_idle, .soft_reset = amdgpu_pp_soft_reset, - .print_status = amdgpu_pp_print_status, .set_clockgating_state = amdgpu_pp_set_clockgating_state, .set_powergating_state = amdgpu_pp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index be6388f73ba2..7700dc22f243 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -57,9 +57,10 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) ttm_bo_kunmap(&bo->dma_buf_vmap); } -struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg) +struct drm_gem_object * +amdgpu_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg) { struct reservation_object *resv = attach->dmabuf->resv; struct amdgpu_device *adev = dev->dev_private; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 972eed2ef787..85aeb0a804bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -28,6 +28,7 @@ */ #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/debugfs.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -46,7 +47,9 @@ * wptr. The GPU then starts fetching commands and executes * them until the pointers are equal again. */ -static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); +static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring); +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); /** * amdgpu_ring_alloc - allocate space on the ring buffer @@ -72,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) ring->count_dw = ndw; ring->wptr_old = ring->wptr; + + if (ring->funcs->begin_use) + ring->funcs->begin_use(ring); + return 0; } @@ -124,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) mb(); amdgpu_ring_set_wptr(ring); + + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -136,78 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) void amdgpu_ring_undo(struct amdgpu_ring *ring) { ring->wptr = ring->wptr_old; -} - -/** - * amdgpu_ring_backup - Back up the content of a ring - * - * @ring: the ring we want to back up - * - * Saves all unprocessed commits from a ring, returns the number of dwords saved. - */ -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data) -{ - unsigned size, ptr, i; - - *data = NULL; - - if (ring->ring_obj == NULL) - return 0; - - /* it doesn't make sense to save anything if all fences are signaled */ - if (!amdgpu_fence_count_emitted(ring)) - return 0; - - ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); - - size = ring->wptr + (ring->ring_size / 4); - size -= ptr; - size &= ring->ptr_mask; - if (size == 0) - return 0; - /* and then save the content of the ring */ - *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); - if (!*data) - return 0; - for (i = 0; i < size; ++i) { - (*data)[i] = ring->ring[ptr++]; - ptr &= ring->ptr_mask; - } - - return size; -} - -/** - * amdgpu_ring_restore - append saved commands to the ring again - * - * @ring: ring to append commands to - * @size: number of dwords we want to write - * @data: saved commands - * - * Allocates space on the ring and restore the previously saved commands. - */ -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data) -{ - int i, r; - - if (!size || !data) - return 0; - - /* restore the saved ring content */ - r = amdgpu_ring_alloc(ring, size); - if (r) - return r; - - for (i = 0; i < size; ++i) { - amdgpu_ring_write(ring, data[i]); - } - - amdgpu_ring_commit(ring); - kfree(data); - return 0; + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -215,18 +156,17 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring, * * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information - * @ring_size: size of the ring + * @max_ndw: maximum number of dw for ring alloc * @nop: nop packet for this ring * * Initialize the driver information for the selected ring (all asics). * Returns 0 on success, error on failure. */ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, - unsigned ring_size, u32 nop, u32 align_mask, + unsigned max_dw, u32 nop, u32 align_mask, struct amdgpu_irq_src *irq_src, unsigned irq_type, enum amdgpu_ring_type ring_type) { - u32 rb_bufsz; int r; if (ring->adev == NULL) { @@ -260,24 +200,22 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - r = amdgpu_wb_get(adev, &ring->next_rptr_offs); + r = amdgpu_wb_get(adev, &ring->cond_exe_offs); if (r) { - dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); + dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); return r; } - ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); - ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; - spin_lock_init(&ring->fence_lock); + ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); + ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; + r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); if (r) { dev_err(adev->dev, "failed initializing fences (%d).\n", r); return r; } - /* Align ring size */ - rb_bufsz = order_base_2(ring_size / 8); - ring_size = (1 << (rb_bufsz + 1)) * 4; - ring->ring_size = ring_size; + ring->ring_size = roundup_pow_of_two(max_dw * 4 * + amdgpu_sched_hw_submission); ring->align_mask = align_mask; ring->nop = nop; ring->type = ring_type; @@ -303,6 +241,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } r = amdgpu_bo_kmap(ring->ring_obj, (void **)&ring->ring); + + memset((void *)ring->ring, 0, ring->ring_size); + amdgpu_bo_unreserve(ring->ring_obj); if (r) { dev_err(adev->dev, "(%d) ring map failed\n", r); @@ -310,8 +251,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } } ring->ptr_mask = (ring->ring_size / 4) - 1; - ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4, - amdgpu_sched_hw_submission); + ring->max_dw = max_dw; if (amdgpu_debugfs_ring_init(adev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); @@ -337,10 +277,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ring->ring = NULL; ring->ring_obj = NULL; + amdgpu_wb_free(ring->adev, ring->cond_exe_offs); amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); - amdgpu_wb_free(ring->adev, ring->next_rptr_offs); if (ring_obj) { r = amdgpu_bo_reserve(ring_obj, false); @@ -351,6 +291,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) } amdgpu_bo_unref(&ring_obj); } + amdgpu_debugfs_ring_fini(ring); } /* @@ -358,96 +299,90 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) */ #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) +/* Layout of file is 12 bytes consisting of + * - rptr + * - wptr + * - driver's copy of wptr + * + * followed by n-words of ring data + */ +static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - int roffset = *(int*)node->info_ent->data; - struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); - - uint32_t rptr, wptr, rptr_next; - unsigned i; - - wptr = amdgpu_ring_get_wptr(ring); - seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); - - rptr = amdgpu_ring_get_rptr(ring); - rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); - - seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); - - seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", - ring->wptr, ring->wptr); - - if (!ring->ready) - return 0; - - /* print 8 dw before current rptr as often it's the last executed - * packet that is the root issue - */ - i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; - while (i != rptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private; + int r, i; + uint32_t value, result, early[3]; + + if (*pos & 3 || size & 3) + return -EINVAL; + + result = 0; + + if (*pos < 12) { + early[0] = amdgpu_ring_get_rptr(ring); + early[1] = amdgpu_ring_get_wptr(ring); + early[2] = ring->wptr; + for (i = *pos / 4; i < 3 && size; i++) { + r = put_user(early[i], (uint32_t *)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; + } } - while (i != wptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + + while (size) { + if (*pos >= (ring->ring_size + 12)) + return result; + + value = ring->ring[(*pos - 12)/4]; + r = put_user(value, (uint32_t*)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; } - return 0; + + return result; } -/* TODO: clean this up !*/ -static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); -static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); -static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); -static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring); -static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring); -static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); -static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); -static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); - -static struct drm_info_list amdgpu_debugfs_ring_info_list[] = { - {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index}, - {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index}, - {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index}, - {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index}, - {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index}, - {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index}, - {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index}, - {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index}, +static const struct file_operations amdgpu_debugfs_ring_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_ring_read, + .llseek = default_llseek }; #endif -static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) +static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) - unsigned i; - for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { - struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i]; - int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data; - struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset); - unsigned r; + struct drm_minor *minor = adev->ddev->primary; + struct dentry *ent, *root = minor->debugfs_root; + char name[32]; - if (other != ring) - continue; + sprintf(name, "amdgpu_ring_%s", ring->name); - r = amdgpu_debugfs_add_files(adev, info, 1); - if (r) - return r; - } + ent = debugfs_create_file(name, + S_IFREG | S_IRUGO, root, + ring, &amdgpu_debugfs_ring_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + i_size_write(ent->d_inode, ring->ring_size + 12); + ring->ent = ent; #endif return 0; } + +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) +{ +#if defined(CONFIG_DEBUG_FS) + debugfs_remove(ring->ent); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..d8af37a845f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, return r; } r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); + memset(sa_manager->cpu_ptr, 0, sa_manager->size); amdgpu_bo_unreserve(sa_manager->bo); return r; } @@ -427,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset); if (i->fence) - seq_printf(m, " protected by 0x%08x on context %d", + seq_printf(m, " protected by 0x%08x on context %llu", i->fence->seqno, i->fence->context); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index c48b4fce5e57..5c8d3022fb87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -109,6 +109,29 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) } /** + * amdgpu_sync_add_later - add the fence to the hash + * + * @sync: sync object to add the fence to + * @f: fence to add + * + * Tries to add the fence to an existing hash entry. Returns true when an entry + * was found, false otherwise. + */ +static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) +{ + struct amdgpu_sync_entry *e; + + hash_for_each_possible(sync->fences, e, node, f->context) { + if (unlikely(e->fence->context != f->context)) + continue; + + amdgpu_sync_keep_later(&e->fence, f); + return true; + } + return false; +} + +/** * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to @@ -127,13 +150,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM) amdgpu_sync_keep_later(&sync->last_vm_update, f); - hash_for_each_possible(sync->fences, e, node, f->context) { - if (unlikely(e->fence->context != f->context)) - continue; - - amdgpu_sync_keep_later(&e->fence, f); + if (amdgpu_sync_add_later(sync, f)) return 0; - } e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); if (!e) @@ -204,45 +222,78 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, return r; } -struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) +/** + * amdgpu_sync_peek_fence - get the next fence not signaled yet + * + * @sync: the sync object + * @ring: optional ring to use for test + * + * Returns the next fence not signaled yet without removing it from the sync + * object. + */ +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; - struct fence *f; int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { + struct fence *f = e->fence; + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); - f = e->fence; + if (ring && s_fence) { + /* For fences from the same ring it is sufficient + * when they are scheduled. + */ + if (s_fence->sched == &ring->sched) { + if (fence_is_signaled(&s_fence->scheduled)) + continue; - hash_del(&e->node); - kmem_cache_free(amdgpu_sync_slab, e); + return &s_fence->scheduled; + } + } - if (!fence_is_signaled(f)) - return f; + if (fence_is_signaled(f)) { + hash_del(&e->node); + fence_put(f); + kmem_cache_free(amdgpu_sync_slab, e); + continue; + } - fence_put(f); + return f; } + return NULL; } -int amdgpu_sync_wait(struct amdgpu_sync *sync) +/** + * amdgpu_sync_get_fence - get the next fence from the sync object + * + * @sync: sync object to use + * + * Get and removes the next fence from the sync object not signaled yet. + */ +struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; - int i, r; + struct fence *f; + int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { - r = fence_wait(e->fence, false); - if (r) - return r; + + f = e->fence; hash_del(&e->node); - fence_put(e->fence); kmem_cache_free(amdgpu_sync_slab, e); - } - return 0; + if (!fence_is_signaled(f)) + return f; + + fence_put(f); + } + return NULL; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 26a5f4acf584..0d8d65eb46cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -11,19 +11,68 @@ #define TRACE_SYSTEM amdgpu #define TRACE_INCLUDE_FILE amdgpu_trace +TRACE_EVENT(amdgpu_mm_rreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + +TRACE_EVENT(amdgpu_mm_wreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + TRACE_EVENT(amdgpu_bo_create, TP_PROTO(struct amdgpu_bo *bo), TP_ARGS(bo), TP_STRUCT__entry( __field(struct amdgpu_bo *, bo) __field(u32, pages) + __field(u32, type) + __field(u32, prefer) + __field(u32, allow) + __field(u32, visible) ), TP_fast_assign( __entry->bo = bo; __entry->pages = bo->tbo.num_pages; + __entry->type = bo->tbo.mem.mem_type; + __entry->prefer = bo->prefered_domains; + __entry->allow = bo->allowed_domains; + __entry->visible = bo->flags; ), - TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) + + TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d", + __entry->bo, __entry->pages, __entry->type, + __entry->prefer, __entry->allow, __entry->visible) ); TRACE_EVENT(amdgpu_cs, @@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job, TRACE_EVENT(amdgpu_vm_grab_id, - TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, - uint64_t pd_addr), - TP_ARGS(vm, ring, vmid, pd_addr), + TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job), + TP_ARGS(vm, ring, job), TP_STRUCT__entry( __field(struct amdgpu_vm *, vm) __field(u32, ring) __field(u32, vmid) __field(u64, pd_addr) + __field(u32, needs_flush) ), TP_fast_assign( __entry->vm = vm; __entry->ring = ring; - __entry->vmid = vmid; - __entry->pd_addr = pd_addr; + __entry->vmid = job->vm_id; + __entry->pd_addr = job->vm_pd_addr; + __entry->needs_flush = job->vm_needs_flush; ), - TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, - __entry->ring, __entry->vmid, __entry->pd_addr) + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u", + __entry->vm, __entry->ring, __entry->vmid, + __entry->pd_addr, __entry->needs_flush) ); TRACE_EVENT(amdgpu_vm_bo_map, @@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_STRUCT__entry( __field(struct amdgpu_bo_list *, list) __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) ), TP_fast_assign( __entry->list = list; __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); ), - TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) + TP_printk("list=%p, bo=%p, bo_size = %Ld", + __entry->list, + __entry->bo, + __entry->bo_size) +); + +TRACE_EVENT(amdgpu_cs_bo_status, + TP_PROTO(uint64_t total_bo, uint64_t total_size), + TP_ARGS(total_bo, total_size), + TP_STRUCT__entry( + __field(u64, total_bo) + __field(u64, total_size) + ), + + TP_fast_assign( + __entry->total_bo = total_bo; + __entry->total_size = total_size; + ), + TP_printk("total bo size = %Ld, total bo count = %Ld", + __entry->total_bo, __entry->total_size) +); + +TRACE_EVENT(amdgpu_ttm_bo_move, + TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), + TP_ARGS(bo, new_placement, old_placement), + TP_STRUCT__entry( + __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) + __field(u32, new_placement) + __field(u32, old_placement) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); + __entry->new_placement = new_placement; + __entry->old_placement = old_placement; + ), + TP_printk("bo=%p from:%d to %d with size = %Ld", + __entry->bo, __entry->old_placement, + __entry->new_placement, __entry->bo_size) ); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ab34190859a8..b7742e62972a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); + if (amdgpu_ttm_tt_get_usermm(bo->ttm)) + return -EPERM; return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); } @@ -284,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ bo->resv, &fence); - /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, fence, - evict, no_wait_gpu, new_mem); + if (r) + return r; + + r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); fence_put(fence); return r; } @@ -384,10 +387,21 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct amdgpu_device *adev; + struct amdgpu_bo *abo; struct ttm_mem_reg *old_mem = &bo->mem; int r; + /* Can't move a pinned BO */ + abo = container_of(bo, struct amdgpu_bo, tbo); + if (WARN_ON_ONCE(abo->pin_count > 0)) + return -EINVAL; + adev = amdgpu_get_adev(bo->bdev); + + /* remember the eviction */ + if (evict) + atomic64_inc(&adev->num_evictions); + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { amdgpu_move_null(bo, new_mem); return 0; @@ -421,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); if (r) { return r; } @@ -616,7 +631,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) set_page_dirty(page); mark_page_accessed(page); - page_cache_release(page); + put_page(page); } sg_free_table(ttm->sg); @@ -903,6 +918,52 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, return flags; } +static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) +{ + struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); + unsigned i, j; + + for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { + struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; + + for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) + if (&tbo->lru == lru->lru[j]) + lru->lru[j] = tbo->lru.prev; + + if (&tbo->swap == lru->swap_lru) + lru->swap_lru = tbo->swap.prev; + } +} + +static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) +{ + struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); + unsigned log2_size = min(ilog2(tbo->num_pages), + AMDGPU_TTM_LRU_SIZE - 1); + + return &adev->mman.log2_size[log2_size]; +} + +static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) +{ + struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); + struct list_head *res = lru->lru[tbo->mem.mem_type]; + + lru->lru[tbo->mem.mem_type] = &tbo->lru; + + return res; +} + +static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) +{ + struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); + struct list_head *res = lru->swap_lru; + + lru->swap_lru = &tbo->swap; + + return res; +} + static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, @@ -916,10 +977,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, + .lru_removal = &amdgpu_ttm_lru_removal, + .lru_tail = &amdgpu_ttm_lru_tail, + .swap_lru_tail = &amdgpu_ttm_swap_lru_tail, }; int amdgpu_ttm_init(struct amdgpu_device *adev) { + unsigned i, j; int r; r = amdgpu_ttm_global_init(adev); @@ -937,6 +1002,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } + + for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { + struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; + + for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) + lru->lru[j] = &adev->mman.bdev.man[j].lru; + lru->swap_lru = &adev->mman.bdev.glob->swap_lru; + } + adev->mman.initialized = true; r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, adev->mc.real_vram_size >> PAGE_SHIFT); @@ -1159,7 +1233,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) static int ttm_pl_vram = TTM_PL_VRAM; static int ttm_pl_tt = TTM_PL_TT; -static struct drm_info_list amdgpu_ttm_debugfs_list[] = { +static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, @@ -1210,6 +1284,8 @@ static const struct file_operations amdgpu_ttm_vram_fops = { .llseek = default_llseek }; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1257,6 +1333,8 @@ static const struct file_operations amdgpu_ttm_gtt_fops = { #endif +#endif + static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) @@ -1272,6 +1350,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) i_size_write(ent->d_inode, adev->mc.mc_vram_size); adev->mman.vram = ent; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root, adev, &amdgpu_ttm_gtt_fops); if (IS_ERR(ent)) @@ -1279,6 +1358,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) i_size_write(ent->d_inode, adev->mc.gtt_size); adev->mman.gtt = ent; +#endif count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); #ifdef CONFIG_SWIOTLB @@ -1300,7 +1380,10 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) debugfs_remove(adev->mman.vram); adev->mman.vram = NULL; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS debugfs_remove(adev->mman.gtt); adev->mman.gtt = NULL; #endif + +#endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index c1a581044417..b11f4e8868d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -40,20 +40,31 @@ #include "uvd/uvd_4_2_d.h" /* 1 second timeout */ -#define UVD_IDLE_TIMEOUT_MS 1000 +#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) + +/* Firmware versions for VI */ +#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) +#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) +#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) +#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) + +/* Polaris10/11 firmware version */ +#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" -#define FIRMWARE_KABINI "radeon/kabini_uvd.bin" -#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" -#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" +#define FIRMWARE_KABINI "radeon/kabini_uvd.bin" +#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" +#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" #endif #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" +#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" +#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -85,8 +96,9 @@ MODULE_FIRMWARE(FIRMWARE_TONGA); MODULE_FIRMWARE(FIRMWARE_CARRIZO); MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_STONEY); +MODULE_FIRMWARE(FIRMWARE_POLARIS10); +MODULE_FIRMWARE(FIRMWARE_POLARIS11); -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) @@ -131,6 +143,12 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_STONEY: fw_name = FIRMWARE_STONEY; break; + case CHIP_POLARIS10: + fw_name = FIRMWARE_POLARIS10; + break; + case CHIP_POLARIS11: + fw_name = FIRMWARE_POLARIS11; + break; default: return -EINVAL; } @@ -151,6 +169,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) return r; } + /* Set the default UVD handles that the firmware can handle */ + adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; + hdr = (const struct common_firmware_header *)adev->uvd.fw->data; family_id = le32_to_cpu(hdr->ucode_version) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; @@ -158,8 +179,28 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", version_major, version_minor, family_id); + /* + * Limit the number of UVD handles depending on microcode major + * and minor versions. The firmware version which has 40 UVD + * instances support is 1.80. So all subsequent versions should + * also have the same support. + */ + if ((version_major > 0x01) || + ((version_major == 0x01) && (version_minor >= 0x50))) + adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; + + adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | + (family_id << 8)); + + if ((adev->asic_type == CHIP_POLARIS10 || + adev->asic_type == CHIP_POLARIS11) && + (adev->uvd.fw_version < FW_1_66_16)) + DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", + version_major, version_minor); + bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) - + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; + + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE + + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, @@ -202,7 +243,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) return r; } - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + for (i = 0; i < adev->uvd.max_handles; ++i) { atomic_set(&adev->uvd.handles[i], 0); adev->uvd.filp[i] = NULL; } @@ -211,6 +252,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + switch (adev->asic_type) { + case CHIP_TONGA: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; + break; + case CHIP_CARRIZO: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; + break; + case CHIP_FIJI: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; + break; + case CHIP_STONEY: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; + break; + default: + adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; + } + return 0; } @@ -218,19 +276,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int r; - if (adev->uvd.vcpu_bo == NULL) - return 0; + kfree(adev->uvd.saved_bo); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (!r) { - amdgpu_bo_kunmap(adev->uvd.vcpu_bo); - amdgpu_bo_unpin(adev->uvd.vcpu_bo); - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - } + if (adev->uvd.vcpu_bo) { + r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); + if (!r) { + amdgpu_bo_kunmap(adev->uvd.vcpu_bo); + amdgpu_bo_unpin(adev->uvd.vcpu_bo); + amdgpu_bo_unreserve(adev->uvd.vcpu_bo); + } - amdgpu_bo_unref(&adev->uvd.vcpu_bo); + amdgpu_bo_unref(&adev->uvd.vcpu_bo); + } amdgpu_ring_fini(&adev->uvd.ring); @@ -241,32 +300,30 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_suspend(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; - int i, r; + unsigned size; + void *ptr; + int i; if (adev->uvd.vcpu_bo == NULL) return 0; - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { - uint32_t handle = atomic_read(&adev->uvd.handles[i]); - if (handle != 0) { - struct fence *fence; + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.handles[i])) + break; - amdgpu_uvd_note_usage(adev); + if (i == AMDGPU_MAX_UVD_HANDLES) + return 0; - r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); - if (r) { - DRM_ERROR("Error destroying UVD (%d)!\n", r); - continue; - } + cancel_delayed_work_sync(&adev->uvd.idle_work); - fence_wait(fence, false); - fence_put(fence); + size = amdgpu_bo_size(adev->uvd.vcpu_bo); + ptr = adev->uvd.cpu_addr; - adev->uvd.filp[i] = NULL; - atomic_set(&adev->uvd.handles[i], 0); - } - } + adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); + if (!adev->uvd.saved_bo) + return -ENOMEM; + + memcpy(adev->uvd.saved_bo, ptr, size); return 0; } @@ -275,23 +332,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; - const struct common_firmware_header *hdr; - unsigned offset; if (adev->uvd.vcpu_bo == NULL) return -EINVAL; - hdr = (const struct common_firmware_header *)adev->uvd.fw->data; - offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, - (adev->uvd.fw->size) - offset); - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - size -= le32_to_cpu(hdr->ucode_size_bytes); ptr = adev->uvd.cpu_addr; - ptr += le32_to_cpu(hdr->ucode_size_bytes); - memset(ptr, 0, size); + if (adev->uvd.saved_bo != NULL) { + memcpy(ptr, adev->uvd.saved_bo, size); + kfree(adev->uvd.saved_bo); + adev->uvd.saved_bo = NULL; + } else { + const struct common_firmware_header *hdr; + unsigned offset; + + hdr = (const struct common_firmware_header *)adev->uvd.fw->data; + offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, + (adev->uvd.fw->size) - offset); + size -= le32_to_cpu(hdr->ucode_size_bytes); + ptr += le32_to_cpu(hdr->ucode_size_bytes); + memset(ptr, 0, size); + } return 0; } @@ -301,13 +364,11 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) struct amdgpu_ring *ring = &adev->uvd.ring; int i, r; - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + for (i = 0; i < adev->uvd.max_handles; ++i) { uint32_t handle = atomic_read(&adev->uvd.handles[i]); if (handle != 0 && adev->uvd.filp[i] == filp) { struct fence *fence; - amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); if (r) { @@ -383,7 +444,8 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) * * Peek into the decode message and calculate the necessary buffer sizes. */ -static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) +static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, + unsigned buf_sizes[]) { unsigned stream_type = msg[4]; unsigned width = msg[6]; @@ -397,7 +459,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) unsigned fs_in_mb = width_in_mb * height_in_mb; unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; - unsigned min_ctx_size = 0; + unsigned min_ctx_size = ~0; image_size = width * height; image_size += image_size / 2; @@ -405,7 +467,6 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) switch (stream_type) { case 0: /* H264 */ - case 7: /* H264 Perf */ switch(level) { case 30: num_dpb_buffer = 8100 / fs_in_mb; @@ -483,6 +544,54 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); break; + case 7: /* H264 Perf */ + switch(level) { + case 30: + num_dpb_buffer = 8100 / fs_in_mb; + break; + case 31: + num_dpb_buffer = 18000 / fs_in_mb; + break; + case 32: + num_dpb_buffer = 20480 / fs_in_mb; + break; + case 41: + num_dpb_buffer = 32768 / fs_in_mb; + break; + case 42: + num_dpb_buffer = 34816 / fs_in_mb; + break; + case 50: + num_dpb_buffer = 110400 / fs_in_mb; + break; + case 51: + num_dpb_buffer = 184320 / fs_in_mb; + break; + default: + num_dpb_buffer = 184320 / fs_in_mb; + break; + } + num_dpb_buffer++; + if (num_dpb_buffer > 17) + num_dpb_buffer = 17; + + /* reference picture buffer */ + min_dpb_size = image_size * num_dpb_buffer; + + if (!adev->uvd.use_ctx_buf){ + /* macroblock context buffer */ + min_dpb_size += + width_in_mb * height_in_mb * num_dpb_buffer * 192; + + /* IT surface buffer */ + min_dpb_size += width_in_mb * height_in_mb * 32; + } else { + /* macroblock context buffer */ + min_ctx_size = + width_in_mb * height_in_mb * num_dpb_buffer * 192; + } + break; + case 16: /* H265 */ image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; image_size = ALIGN(image_size, 256); @@ -561,7 +670,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, amdgpu_bo_kunmap(bo); /* try to alloc a new handle */ - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + for (i = 0; i < adev->uvd.max_handles; ++i) { if (atomic_read(&adev->uvd.handles[i]) == handle) { DRM_ERROR("Handle 0x%x already in use!\n", handle); return -EINVAL; @@ -574,17 +683,17 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, } DRM_ERROR("No more free UVD handles!\n"); - return -EINVAL; + return -ENOSPC; case 1: /* it's a decode msg, calc buffer sizes */ - r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); + r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes); amdgpu_bo_kunmap(bo); if (r) return r; /* validate the handle */ - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + for (i = 0; i < adev->uvd.max_handles; ++i) { if (atomic_read(&adev->uvd.handles[i]) == handle) { if (adev->uvd.filp[i] != ctx->parser->filp) { DRM_ERROR("UVD handle collision detected!\n"); @@ -599,7 +708,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, case 2: /* it's a destroy msg, free the handle */ - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) + for (i = 0; i < adev->uvd.max_handles; ++i) atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; @@ -825,8 +934,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return -EINVAL; } - amdgpu_uvd_note_usage(ctx.parser->adev); - return 0; } @@ -879,8 +986,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = f; + r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + job->fence = fence_get(f); if (r) goto err_free; @@ -1011,7 +1118,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) + for (i = 0; i < adev->uvd.max_handles; ++i) if (atomic_read(&adev->uvd.handles[i])) ++handles; @@ -1022,16 +1129,14 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) amdgpu_asic_set_uvd_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } } -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); - set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); if (set_clocks) { if (adev->pm.dpm_enabled) { @@ -1041,3 +1146,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) } } } + +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); +} + +/** + * amdgpu_uvd_ring_test_ib - test ib execution + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully execute an IB + */ +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct fence *fence; + long r; + + r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + if (r) { + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + goto error; + } + + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); + if (r) { + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + goto error; + } + + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + } else { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; + } + +error: + fence_put(fence); + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 9a3b449081a7..c850009602d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring); +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 4bec0c108cea..05865ce35351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -36,20 +36,22 @@ #include "cikd.h" /* 1 second timeout */ -#define VCE_IDLE_TIMEOUT_MS 1000 +#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" -#define FIRMWARE_KABINI "radeon/kabini_vce.bin" -#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" -#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" +#define FIRMWARE_KABINI "radeon/kabini_vce.bin" +#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" +#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" #define FIRMWARE_MULLINS "radeon/mullins_vce.bin" #endif #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" +#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" +#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); @@ -62,6 +64,8 @@ MODULE_FIRMWARE(FIRMWARE_TONGA); MODULE_FIRMWARE(FIRMWARE_CARRIZO); MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_STONEY); +MODULE_FIRMWARE(FIRMWARE_POLARIS10); +MODULE_FIRMWARE(FIRMWARE_POLARIS11); static void amdgpu_vce_idle_work_handler(struct work_struct *work); @@ -81,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) unsigned ucode_version, version_major, version_minor, binary_id; int i, r; - INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); - switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: @@ -113,6 +115,12 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_STONEY: fw_name = FIRMWARE_STONEY; break; + case CHIP_POLARIS10: + fw_name = FIRMWARE_POLARIS10; + break; + case CHIP_POLARIS11: + fw_name = FIRMWARE_POLARIS11; + break; default: return -EINVAL; @@ -187,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) adev->vce.filp[i] = NULL; } + INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); + mutex_init(&adev->vce.idle_mutex); + return 0; } @@ -210,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) amdgpu_ring_fini(&adev->vce.ring[1]); release_firmware(adev->vce.fw); + mutex_destroy(&adev->vce.idle_mutex); return 0; } @@ -234,6 +246,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) if (i == AMDGPU_MAX_VCE_HANDLES) return 0; + cancel_delayed_work_sync(&adev->vce.idle_work); /* TODO: suspending running encoding sessions isn't supported */ return -EINVAL; } @@ -299,37 +312,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_asic_set_vce_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); } } /** - * amdgpu_vce_note_usage - power up VCE + * amdgpu_vce_ring_begin_use - power up VCE * - * @adev: amdgpu_device pointer + * @ring: amdgpu ring * * Make sure VCE is powerd up when we want to use it */ -static void amdgpu_vce_note_usage(struct amdgpu_device *adev) +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) { - bool streams_changed = false; - bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); - set_clocks &= schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); - - if (adev->pm.dpm_enabled) { - /* XXX figure out if the streams changed */ - streams_changed = false; - } + struct amdgpu_device *adev = ring->adev; + bool set_clocks; - if (set_clocks || streams_changed) { + mutex_lock(&adev->vce.idle_mutex); + set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); + if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_vce(adev, true); } else { amdgpu_asic_set_vce_clocks(adev, 53300, 40000); } } + mutex_unlock(&adev->vce.idle_mutex); +} + +/** + * amdgpu_vce_ring_end_use - power VCE down + * + * @ring: amdgpu ring + * + * Schedule work to power VCE down again + */ +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); } /** @@ -346,11 +366,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) int i, r; for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { uint32_t handle = atomic_read(&adev->vce.handles[i]); + if (!handle || adev->vce.filp[i] != filp) continue; - amdgpu_vce_note_usage(adev); - r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); if (r) DRM_ERROR("Error destroying VCE handle (%d)!\n", r); @@ -425,8 +444,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = f; + r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + job->fence = fence_get(f); if (r) goto err; @@ -458,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *f = NULL; - uint64_t dummy; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -466,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ ib->length_dw = 0; @@ -474,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = 0x00000014; /* len */ - ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; - ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000020; /* len */ + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ + ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ + ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ + ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000008; /* len */ ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ @@ -487,8 +507,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = f; + r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + job->fence = fence_get(f); if (r) goto err; @@ -569,12 +589,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, * we we don't have another free session index. */ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, - uint32_t handle, bool *allocated) + uint32_t handle, uint32_t *allocated) { unsigned i; - *allocated = false; - /* validate the handle */ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { if (atomic_read(&p->adev->vce.handles[i]) == handle) { @@ -591,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { p->adev->vce.filp[i] = p->filp; p->adev->vce.img_size[i] = 0; - *allocated = true; + *allocated |= 1 << i; return i; } } @@ -611,15 +629,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned fb_idx = 0, bs_idx = 0; int session_idx = -1; - bool destroyed = false; - bool created = false; - bool allocated = false; + uint32_t destroyed = 0; + uint32_t created = 0; + uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; int i, r = 0, idx = 0; - amdgpu_vce_note_usage(p->adev); - while (idx < ib->length_dw) { uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); @@ -630,30 +646,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; } - if (destroyed) { - DRM_ERROR("No other command allowed after destroy!\n"); - r = -EINVAL; - goto out; - } - switch (cmd) { - case 0x00000001: // session + case 0x00000001: /* session */ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); session_idx = amdgpu_vce_validate_handle(p, handle, &allocated); - if (session_idx < 0) - return session_idx; + if (session_idx < 0) { + r = session_idx; + goto out; + } size = &p->adev->vce.img_size[session_idx]; break; - case 0x00000002: // task info + case 0x00000002: /* task info */ fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); break; - case 0x01000001: // create - created = true; - if (!allocated) { + case 0x01000001: /* create */ + created |= 1 << session_idx; + if (destroyed & (1 << session_idx)) { + destroyed &= ~(1 << session_idx); + allocated |= 1 << session_idx; + + } else if (!(allocated & (1 << session_idx))) { DRM_ERROR("Handle already in use!\n"); r = -EINVAL; goto out; @@ -664,16 +680,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 8 * 3 / 2; break; - case 0x04000001: // config extension - case 0x04000002: // pic control - case 0x04000005: // rate control - case 0x04000007: // motion estimation - case 0x04000008: // rdo - case 0x04000009: // vui - case 0x05000002: // auxiliary buffer + case 0x04000001: /* config extension */ + case 0x04000002: /* pic control */ + case 0x04000005: /* rate control */ + case 0x04000007: /* motion estimation */ + case 0x04000008: /* rdo */ + case 0x04000009: /* vui */ + case 0x05000002: /* auxiliary buffer */ break; - case 0x03000001: // encode + case 0x03000001: /* encode */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, *size, 0); if (r) @@ -685,18 +701,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x02000001: // destroy - destroyed = true; + case 0x02000001: /* destroy */ + destroyed |= 1 << session_idx; break; - case 0x05000001: // context buffer + case 0x05000001: /* context buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, *size * 2, 0); if (r) goto out; break; - case 0x05000004: // video bitstream buffer + case 0x05000004: /* video bitstream buffer */ tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, tmp, bs_idx); @@ -704,7 +720,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x05000005: // feedback buffer + case 0x05000005: /* feedback buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 4096, fb_idx); if (r) @@ -726,21 +742,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) idx += len / 4; } - if (allocated && !created) { + if (allocated & ~created) { DRM_ERROR("New session without create command!\n"); r = -ENOENT; } out: - if ((!r && destroyed) || (r && allocated)) { - /* - * IB contains a destroy msg or we have allocated an - * handle and got an error, anyway free the handle - */ - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) - atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); + if (!r) { + /* No error, free all destroyed handle slots */ + tmp = destroyed; + } else { + /* Error during parsing, free all allocated handle slots */ + tmp = allocated; } + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) + if (tmp & (1 << i)) + atomic_set(&p->adev->vce.handles[i], 0); + return r; } @@ -751,7 +770,8 @@ out: * @ib: the IB to execute * */ -void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) +void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); @@ -825,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) * @ring: the engine to test on * */ -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct fence *fence = NULL; - int r; + long r; /* skip vce ring1 ib test for now, since it's not reliable */ if (ring == &ring->adev->vce.ring[1]) @@ -836,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); goto error; } r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); goto error; } - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } error: fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index ef99d2370182..63f83d0d985c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -34,10 +34,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); -void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); +void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring); +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout); +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b6c011b83641..8e642fc48df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/fence-array.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -53,6 +54,18 @@ /* Special value that no flush is necessary */ #define AMDGPU_VM_NO_FLUSH (~0ll) +/* Local structure. Encapsulate some VM table update parameters to reduce + * the number of function parameters + */ +struct amdgpu_vm_update_params { + /* address where to copy page table entries from */ + uint64_t src; + /* DMA addresses to use for mapping */ + dma_addr_t *pages_addr; + /* indirect buffer to fill with commands */ + struct amdgpu_ib *ib; +}; + /** * amdgpu_vm_num_pde - return the number of page directory entries * @@ -102,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, /** * amdgpu_vm_get_bos - add the vm BOs to a duplicates list * + * @adev: amdgpu device pointer * @vm: vm providing the BOs * @duplicates: head of duplicates list * * Add the page directory to the BO duplicates list * for command submission. */ -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates) { + uint64_t num_evictions; unsigned i; + /* We only need to validate the page tables + * if they aren't already valid. + */ + num_evictions = atomic64_read(&adev->num_evictions); + if (num_evictions == vm->last_eviction_counter) + return; + /* add the vm page table to the list */ for (i = 0; i <= vm->max_pde_used; ++i) { struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; @@ -150,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, spin_unlock(&glob->lru_lock); } +static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vm_id *id) +{ + return id->current_gpu_reset_count != + atomic_read(&adev->gpu_reset_counter) ? true : false; +} + /** * amdgpu_vm_grab_id - allocate the next free VMID * @@ -162,82 +192,175 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr) + struct amdgpu_job *job) { - uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_device *adev = ring->adev; - struct amdgpu_vm_id *id = &vm->ids[ring->idx]; + uint64_t fence_context = adev->fence_context + ring->idx; struct fence *updates = sync->last_vm_update; - int r; + struct amdgpu_vm_id *id, *idle; + struct fence **fences; + unsigned i; + int r = 0; + + fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, + GFP_KERNEL); + if (!fences) + return -ENOMEM; mutex_lock(&adev->vm_manager.lock); - /* check if the id is still valid */ - if (id->mgr_id) { - struct fence *flushed = id->flushed_updates; - bool is_later; - long owner; + /* Check if we have an idle VMID */ + i = 0; + list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { + fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); + if (!fences[i]) + break; + ++i; + } - if (!flushed) - is_later = true; - else if (!updates) - is_later = false; - else - is_later = fence_is_later(updates, flushed); + /* If we can't find a idle VMID to use, wait till one becomes available */ + if (&idle->list == &adev->vm_manager.ids_lru) { + u64 fence_context = adev->vm_manager.fence_context + ring->idx; + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; + struct fence_array *array; + unsigned j; + + for (j = 0; j < i; ++j) + fence_get(fences[j]); + + array = fence_array_create(i, fences, fence_context, + seqno, true); + if (!array) { + for (j = 0; j < i; ++j) + fence_put(fences[j]); + kfree(fences); + r = -ENOMEM; + goto error; + } - owner = atomic_long_read(&id->mgr_id->owner); - if (!is_later && owner == (long)id && - pd_addr == id->pd_gpu_addr) { - r = amdgpu_sync_fence(ring->adev, sync, - id->mgr_id->active); - if (r) { - mutex_unlock(&adev->vm_manager.lock); - return r; - } + r = amdgpu_sync_fence(ring->adev, sync, &array->base); + fence_put(&array->base); + if (r) + goto error; + + mutex_unlock(&adev->vm_manager.lock); + return 0; - fence_put(id->mgr_id->active); - id->mgr_id->active = fence_get(fence); + } + kfree(fences); - list_move_tail(&id->mgr_id->list, - &adev->vm_manager.ids_lru); + job->vm_needs_flush = true; + /* Check if we can use a VMID already assigned to this VM */ + i = ring->idx; + do { + struct fence *flushed; - *vm_id = id->mgr_id - adev->vm_manager.ids; - *vm_pd_addr = AMDGPU_VM_NO_FLUSH; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, - *vm_pd_addr); + id = vm->ids[i++]; + if (i == AMDGPU_MAX_RINGS) + i = 0; - mutex_unlock(&adev->vm_manager.lock); - return 0; - } - } + /* Check all the prerequisites to using this VMID */ + if (!id) + continue; + if (amdgpu_vm_is_gpu_reset(adev, id)) + continue; - id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_manager_id, - list); + if (atomic64_read(&id->owner) != vm->client_id) + continue; - r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active); - if (!r) { - fence_put(id->mgr_id->active); - id->mgr_id->active = fence_get(fence); + if (job->vm_pd_addr != id->pd_gpu_addr) + continue; - fence_put(id->flushed_updates); - id->flushed_updates = fence_get(updates); + if (!id->last_flush) + continue; - id->pd_gpu_addr = pd_addr; + if (id->last_flush->context != fence_context && + !fence_is_signaled(id->last_flush)) + continue; - list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru); - atomic_long_set(&id->mgr_id->owner, (long)id); + flushed = id->flushed_updates; + if (updates && + (!flushed || fence_is_later(updates, flushed))) + continue; - *vm_id = id->mgr_id - adev->vm_manager.ids; - *vm_pd_addr = pd_addr; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); - } + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence); + if (r) + goto error; + + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); + list_move_tail(&id->list, &adev->vm_manager.ids_lru); + vm->ids[ring->idx] = id; + + job->vm_id = id - adev->vm_manager.ids; + job->vm_needs_flush = false; + trace_amdgpu_vm_grab_id(vm, ring->idx, job); + + mutex_unlock(&adev->vm_manager.lock); + return 0; + + } while (i != ring->idx); + + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; + + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence); + if (r) + goto error; + + fence_put(id->first); + id->first = fence_get(fence); + + fence_put(id->last_flush); + id->last_flush = NULL; + + fence_put(id->flushed_updates); + id->flushed_updates = fence_get(updates); + + id->pd_gpu_addr = job->vm_pd_addr; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); + list_move_tail(&id->list, &adev->vm_manager.ids_lru); + atomic64_set(&id->owner, vm->client_id); + vm->ids[ring->idx] = id; + + job->vm_id = id - adev->vm_manager.ids; + trace_amdgpu_vm_grab_id(vm, ring->idx, job); +error: mutex_unlock(&adev->vm_manager.lock); return r; } +static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + const struct amdgpu_ip_block_version *ip_block; + + if (ring->type != AMDGPU_RING_TYPE_COMPUTE) + /* only compute rings */ + return false; + + ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (!ip_block) + return false; + + if (ip_block->major <= 7) { + /* gfx7 has no workaround */ + return true; + } else if (ip_block->major == 8) { + if (adev->gfx.mec_fw_version >= 673) + /* gfx8 is fixed in MEC firmware 673 */ + return false; + else + return true; + } + return false; +} + /** * amdgpu_vm_flush - hardware flush the vm * @@ -247,43 +370,55 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, * * Emit a VM flush when it is necessary. */ -void amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id]; + struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( - mgr_id->gds_base != gds_base || - mgr_id->gds_size != gds_size || - mgr_id->gws_base != gws_base || - mgr_id->gws_size != gws_size || - mgr_id->oa_base != oa_base || - mgr_id->oa_size != oa_size); + id->gds_base != job->gds_base || + id->gds_size != job->gds_size || + id->gws_base != job->gws_base || + id->gws_size != job->gws_size || + id->oa_base != job->oa_base || + id->oa_size != job->oa_size); + int r; if (ring->funcs->emit_pipeline_sync && ( - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed)) + job->vm_needs_flush || gds_switch_needed || + amdgpu_vm_ring_has_compute_vm_bug(ring))) amdgpu_ring_emit_pipeline_sync(ring); - if (pd_addr != AMDGPU_VM_NO_FLUSH) { - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); - amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); + if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || + amdgpu_vm_is_gpu_reset(adev, id))) { + struct fence *fence; + + trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); + amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); + + r = amdgpu_fence_emit(ring, &fence); + if (r) + return r; + + mutex_lock(&adev->vm_manager.lock); + fence_put(id->last_flush); + id->last_flush = fence; + mutex_unlock(&adev->vm_manager.lock); } if (gds_switch_needed) { - mgr_id->gds_base = gds_base; - mgr_id->gds_size = gds_size; - mgr_id->gws_base = gws_base; - mgr_id->gws_size = gws_size; - mgr_id->oa_base = oa_base; - mgr_id->oa_size = oa_size; - amdgpu_ring_emit_gds_switch(ring, vm_id, - gds_base, gds_size, - gws_base, gws_size, - oa_base, oa_size); + id->gds_base = job->gds_base; + id->gds_size = job->gds_size; + id->gws_base = job->gws_base; + id->gws_size = job->gws_size; + id->oa_base = job->oa_base; + id->oa_size = job->oa_size; + amdgpu_ring_emit_gds_switch(ring, job->vm_id, + job->gds_base, job->gds_size, + job->gws_base, job->gws_size, + job->oa_base, job->oa_size); } + + return 0; } /** @@ -296,14 +431,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, */ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id) { - struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id]; - - mgr_id->gds_base = 0; - mgr_id->gds_size = 0; - mgr_id->gws_base = 0; - mgr_id->gws_size = 0; - mgr_id->oa_base = 0; - mgr_id->oa_size = 0; + struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; + + id->gds_base = 0; + id->gds_size = 0; + id->gws_base = 0; + id->gws_size = 0; + id->oa_base = 0; + id->oa_size = 0; } /** @@ -335,9 +470,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: GTT hw access flags - * @ib: indirect buffer to fill with commands + * @vm_update_params: see amdgpu_vm_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update @@ -348,30 +481,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, - struct amdgpu_ib *ib, + struct amdgpu_vm_update_params + *vm_update_params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if ((gtt == &adev->gart) && (flags == gtt_flags)) { - uint64_t src = gtt->table_addr + (addr >> 12) * 8; - amdgpu_vm_copy_pte(adev, ib, pe, src, count); + if (vm_update_params->src) { + amdgpu_vm_copy_pte(adev, vm_update_params->ib, + pe, (vm_update_params->src + (addr >> 12) * 8), count); - } else if (gtt) { - dma_addr_t *pages_addr = gtt->pages_addr; - amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, - count, incr, flags); + } else if (vm_update_params->pages_addr) { + amdgpu_vm_write_pte(adev, vm_update_params->ib, + vm_update_params->pages_addr, + pe, addr, count, incr, flags); } else if (count < 3) { - amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, + amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, count, incr, flags); } else { - amdgpu_vm_set_pte_pde(adev, ib, pe, addr, + amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, count, incr, flags); } } @@ -391,10 +523,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_ring *ring; struct fence *fence = NULL; struct amdgpu_job *job; + struct amdgpu_vm_update_params vm_update_params; unsigned entries; uint64_t addr; int r; + memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -412,7 +546,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; - amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, + vm_update_params.ib = &job->ibs[0]; + amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -485,11 +620,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; - struct amdgpu_ib *ib; + struct amdgpu_vm_update_params vm_update_params; struct fence *fence = NULL; int r; + memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -502,7 +638,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; - ib = &job->ibs[0]; + vm_update_params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -522,7 +658,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, NULL, 0, ib, + amdgpu_vm_update_pages(adev, &vm_update_params, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); @@ -537,14 +673,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + amdgpu_vm_update_pages(adev, &vm_update_params, + last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); - if (ib->length_dw != 0) { - amdgpu_ring_pad_ib(ring, ib); + if (vm_update_params.ib->length_dw != 0) { + amdgpu_ring_pad_ib(ring, vm_update_params.ib); amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(ib->length_dw > ndw); + WARN_ON(vm_update_params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) @@ -570,18 +707,15 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: GTT hw mapping flags - * @ib: IB for the update + * @vm_update_params: see amdgpu_vm_update_params definition * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, - struct amdgpu_ib *ib, + struct amdgpu_vm_update_params + *vm_update_params, uint64_t pe_start, uint64_t pe_end, uint64_t addr, uint32_t flags) { @@ -618,10 +752,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, return; /* system pages are non continuously */ - if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { + if (vm_update_params->src || vm_update_params->pages_addr || + !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, + amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); return; @@ -630,21 +765,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, + amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, + amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, + amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -653,72 +788,95 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: GTT hw mapping flags + * @vm_update_params: see amdgpu_vm_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range - * @dst: destination address to map to + * @dst: destination address to map to, the next dst inside the function * @flags: mapping flags * * Update the page tables in the range @start - @end. */ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, + struct amdgpu_vm_update_params + *vm_update_params, struct amdgpu_vm *vm, - struct amdgpu_ib *ib, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) { const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; - uint64_t addr; + uint64_t cur_pe_start, cur_pe_end, cur_dst; + uint64_t addr; /* next GPU address to be updated */ + uint64_t pt_idx; + struct amdgpu_bo *pt; + unsigned nptes; /* next number of ptes to be updated */ + uint64_t next_pe_start; + + /* initialize the variables */ + addr = start; + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + + cur_pe_start = amdgpu_bo_gpu_offset(pt); + cur_pe_start += (addr & mask) * 8; + cur_pe_end = cur_pe_start + 8 * nptes; + cur_dst = dst; + + /* for next ptb*/ + addr += nptes; + dst += nptes * AMDGPU_GPU_PAGE_SIZE; /* walk over the address space and update the page tables */ - for (addr = start; addr < end; ) { - uint64_t pt_idx = addr >> amdgpu_vm_block_size; - struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; - unsigned nptes; - uint64_t pe_start; + while (addr < end) { + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - pe_start = amdgpu_bo_gpu_offset(pt); - pe_start += (addr & mask) * 8; - - if (last_pe_end != pe_start) { + next_pe_start = amdgpu_bo_gpu_offset(pt); + next_pe_start += (addr & mask) * 8; - amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, - last_pe_start, last_pe_end, - last_dst, flags); - - last_pe_start = pe_start; - last_pe_end = pe_start + 8 * nptes; - last_dst = dst; + if (cur_pe_end == next_pe_start) { + /* The next ptb is consecutive to current ptb. + * Don't call amdgpu_vm_frag_ptes now. + * Will update two ptbs together in future. + */ + cur_pe_end += 8 * nptes; } else { - last_pe_end += 8 * nptes; + amdgpu_vm_frag_ptes(adev, vm_update_params, + cur_pe_start, cur_pe_end, + cur_dst, flags); + + cur_pe_start = next_pe_start; + cur_pe_end = next_pe_start + 8 * nptes; + cur_dst = dst; } + /* for next ptb*/ addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, - last_pe_start, last_pe_end, - last_dst, flags); + amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, + cur_pe_end, cur_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping - * @gtt_flags: flags as they are used for GTT + * @exclusive: fence we need to sync to + * @src: address where to copy page table entries from + * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @start: start of mapped range * @last: last mapped entry @@ -730,8 +888,9 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, - uint32_t gtt_flags, + struct fence *exclusive, + uint64_t src, + dma_addr_t *pages_addr, struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint32_t flags, uint64_t addr, @@ -741,11 +900,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; - struct amdgpu_ib *ib; + struct amdgpu_vm_update_params vm_update_params; struct fence *f = NULL; int r; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + memset(&vm_update_params, 0, sizeof(vm_update_params)); + vm_update_params.src = src; + vm_update_params.pages_addr = pages_addr; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -762,11 +924,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if ((gtt == &adev->gart) && (flags == gtt_flags)) { + if (vm_update_params.src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (gtt) { + } else if (vm_update_params.pages_addr) { /* header for write data commands */ ndw += ncmds * 4; @@ -785,7 +947,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) return r; - ib = &job->ibs[0]; + vm_update_params.ib = &job->ibs[0]; + + r = amdgpu_sync_fence(adev, &job->sync, exclusive); + if (r) + goto error_free; r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, owner); @@ -796,11 +962,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, - addr, flags); + amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, + last + 1, addr, flags); - amdgpu_ring_pad_ib(ring, ib); - WARN_ON(ib->length_dw > ndw); + amdgpu_ring_pad_ib(ring, vm_update_params.ib); + WARN_ON(vm_update_params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) @@ -823,11 +989,13 @@ error_free: * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * * @adev: amdgpu_device pointer - * @gtt: GART instance to use for mapping + * @exclusive: fence we need to sync to + * @gtt_flags: flags as they are used for GTT + * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @mapping: mapped range and flags to use for the update * @addr: addr to set the area to - * @gtt_flags: flags as they are used for GTT + * @flags: HW flags for the mapping * @fence: optional resulting fence * * Split the mapping into smaller chunks so that each update fits @@ -835,16 +1003,17 @@ error_free: * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, - struct amdgpu_gart *gtt, + struct fence *exclusive, uint32_t gtt_flags, + dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, - uint64_t addr, struct fence **fence) + uint32_t flags, uint64_t addr, + struct fence **fence) { const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; - uint64_t start = mapping->it.start; - uint32_t flags = gtt_flags; + uint64_t src = 0, start = mapping->it.start; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here @@ -857,10 +1026,16 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, trace_amdgpu_vm_bo_update(mapping); + if (pages_addr) { + if (flags == gtt_flags) + src = adev->gart.table_addr + (addr >> 12) * 8; + addr = 0; + } addr += mapping->offset; - if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) - return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + if (!pages_addr || src) + return amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, mapping->it.last, flags, addr, fence); @@ -868,7 +1043,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t last; last = min((uint64_t)mapping->it.last, start + max_size - 1); - r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + r = amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, last, flags, addr, fence); if (r) @@ -899,16 +1075,21 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_gart *gtt = NULL; - uint32_t flags; + dma_addr_t *pages_addr = NULL; + uint32_t gtt_flags, flags; + struct fence *exclusive; uint64_t addr; int r; if (mem) { + struct ttm_dma_tt *ttm; + addr = (u64)mem->start << PAGE_SHIFT; switch (mem->mem_type) { case TTM_PL_TT: - gtt = &bo_va->bo->adev->gart; + ttm = container_of(bo_va->bo->tbo.ttm, struct + ttm_dma_tt, ttm); + pages_addr = ttm->dma_address; break; case TTM_PL_VRAM: @@ -918,11 +1099,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, default: break; } + + exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); } else { addr = 0; + exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); + gtt_flags = (adev == bo_va->bo->adev) ? flags : 0; spin_lock(&vm->status_lock); if (!list_empty(&bo_va->vm_status)) @@ -930,7 +1115,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, + gtt_flags, pages_addr, vm, + mapping, flags, addr, &bo_va->last_pt_update); if (r) return r; @@ -976,8 +1163,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); - r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, - 0, NULL); + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, + 0, 0, NULL); kfree(mapping); if (r) return r; @@ -1320,11 +1507,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amd_sched_rq *rq; int i, r; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - vm->ids[i].mgr_id = NULL; - vm->ids[i].flushed_updates = NULL; - } + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + vm->ids[i] = NULL; vm->va = RB_ROOT; + vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->cleared); @@ -1368,6 +1554,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unreserve(vm->page_directory); if (r) goto error_free_page_directory; + vm->last_eviction_counter = atomic64_read(&adev->num_evictions); return 0; @@ -1416,15 +1603,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_vm_id *id = &vm->ids[i]; - - if (id->mgr_id) - atomic_long_cmpxchg(&id->mgr_id->owner, - (long)id, 0); - fence_put(id->flushed_updates); - } } /** @@ -1443,11 +1621,17 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) /* skip over VMID 0, since it is the system VM */ for (i = 1; i < adev->vm_manager.num_ids; ++i) { amdgpu_vm_reset_id(adev, i); + amdgpu_sync_create(&adev->vm_manager.ids[i].active); list_add_tail(&adev->vm_manager.ids[i].list, &adev->vm_manager.ids_lru); } + adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + adev->vm_manager.seqno[i] = 0; + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); + atomic64_set(&adev->vm_manager.client_counter, 0); } /** @@ -1461,6 +1645,11 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) { unsigned i; - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.ids[i].active); + for (i = 0; i < AMDGPU_NUM_VM; ++i) { + struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; + + fence_put(adev->vm_manager.ids[i].first); + amdgpu_sync_free(&adev->vm_manager.ids[i].active); + fence_put(id->flushed_updates); + } } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index fece8f45dc7a..49daf6d723e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -92,7 +92,7 @@ #define ATOM_WS_AND_MASK 0x45 #define ATOM_WS_FB_WINDOW 0x46 #define ATOM_WS_ATTRIBUTES 0x47 -#define ATOM_WS_REGPTR 0x48 +#define ATOM_WS_REGPTR 0x48 #define ATOM_IIO_NOP 0 #define ATOM_IIO_START 1 diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 49aa35016653..49a39b1a0a96 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -461,13 +461,14 @@ union set_pixel_clock { PIXEL_CLOCK_PARAMETERS_V3 v3; PIXEL_CLOCK_PARAMETERS_V5 v5; PIXEL_CLOCK_PARAMETERS_V6 v6; + PIXEL_CLOCK_PARAMETERS_V7 v7; }; /* on DCE5, make sure the voltage is high enough to support the * required disp clk. */ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, - u32 dispclk) + u32 dispclk) { u8 frev, crev; int index; @@ -510,6 +511,49 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); } +union set_dce_clock { + SET_DCE_CLOCK_PS_ALLOCATION_V1_1 v1_1; + SET_DCE_CLOCK_PS_ALLOCATION_V2_1 v2_1; +}; + +u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev, + u32 freq, u8 clk_type, u8 clk_src) +{ + u8 frev, crev; + int index; + union set_dce_clock args; + u32 ret_freq = 0; + + memset(&args, 0, sizeof(args)); + + index = GetIndexIntoMasterTable(COMMAND, SetDCEClock); + if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, + &crev)) + return 0; + + switch (frev) { + case 2: + switch (crev) { + case 1: + args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */ + args.v2_1.asParam.ucDCEClkType = clk_type; + args.v2_1.asParam.ucDCEClkSrc = clk_src; + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10; + break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); + return 0; + } + break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); + return 0; + } + + return ret_freq; +} + static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id) { if (ENCODER_MODE_IS_DP(encoder_mode)) { @@ -523,18 +567,18 @@ static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id) } void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, - u32 crtc_id, - int pll_id, - u32 encoder_mode, - u32 encoder_id, - u32 clock, - u32 ref_div, - u32 fb_div, - u32 frac_fb_div, - u32 post_div, - int bpc, - bool ss_enabled, - struct amdgpu_atom_ss *ss) + u32 crtc_id, + int pll_id, + u32 encoder_mode, + u32 encoder_id, + u32 clock, + u32 ref_div, + u32 fb_div, + u32 frac_fb_div, + u32 post_div, + int bpc, + bool ss_enabled, + struct amdgpu_atom_ss *ss) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; @@ -652,6 +696,34 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, args.v6.ucEncoderMode = encoder_mode; args.v6.ucPpll = pll_id; break; + case 7: + args.v7.ulPixelClock = cpu_to_le32(clock * 10); /* 100 hz units */ + args.v7.ucMiscInfo = 0; + if ((encoder_mode == ATOM_ENCODER_MODE_DVI) && + (clock > 165000)) + args.v7.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; + args.v7.ucCRTC = crtc_id; + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS; + break; + case 10: + args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4; + break; + case 12: + args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2; + break; + case 16: + args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1; + break; + } + } + args.v7.ucTransmitterID = encoder_id; + args.v7.ucEncoderMode = encoder_mode; + args.v7.ucPpll = pll_id; + break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h index c67083335b13..0eeda8e3bf5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h @@ -37,6 +37,8 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, struct drm_display_mode *mode); void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, u32 dispclk); +u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev, + u32 freq, u8 clk_type, u8 clk_src); void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, u32 crtc_id, int pll_id, diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index bf731e9f643e..7f85c2c1d681 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector } } } else { - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; if (max_pix_clock >= pix_clock) { *dp_lanes = lane_num; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 1e0bba29e167..c32eca26155c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); @@ -298,6 +299,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder, && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + /* vertical FP must be at least 1 */ + if (mode->crtc_vsync_start == mode->crtc_vdisplay) + adjusted_mode->crtc_vsync_start++; + /* get the native mode for scaling */ if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) amdgpu_panel_mode_fixup(encoder, adjusted_mode); @@ -563,6 +568,7 @@ union dig_encoder_control { DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; + DIG_ENCODER_CONTROL_PARAMETERS_V5 v5; }; void @@ -690,6 +696,47 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder, else args.v4.ucHPD_ID = hpd_id + 1; break; + case 5: + switch (action) { + case ATOM_ENCODER_CMD_SETUP_PANEL_MODE: + args.v5.asDPPanelModeParam.ucAction = action; + args.v5.asDPPanelModeParam.ucPanelMode = panel_mode; + args.v5.asDPPanelModeParam.ucDigId = dig->dig_encoder; + break; + case ATOM_ENCODER_CMD_STREAM_SETUP: + args.v5.asStreamParam.ucAction = action; + args.v5.asStreamParam.ucDigId = dig->dig_encoder; + args.v5.asStreamParam.ucDigMode = + amdgpu_atombios_encoder_get_encoder_mode(encoder); + if (ENCODER_MODE_IS_DP(args.v5.asStreamParam.ucDigMode)) + args.v5.asStreamParam.ucLaneNum = dp_lane_count; + else if (amdgpu_dig_monitor_is_duallink(encoder, + amdgpu_encoder->pixel_clock)) + args.v5.asStreamParam.ucLaneNum = 8; + else + args.v5.asStreamParam.ucLaneNum = 4; + args.v5.asStreamParam.ulPixelClock = + cpu_to_le32(amdgpu_encoder->pixel_clock / 10); + args.v5.asStreamParam.ucBitPerColor = + amdgpu_atombios_encoder_get_bpc(encoder); + args.v5.asStreamParam.ucLinkRateIn270Mhz = dp_clock / 27000; + break; + case ATOM_ENCODER_CMD_DP_LINK_TRAINING_START: + case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1: + case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2: + case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3: + case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4: + case ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE: + case ATOM_ENCODER_CMD_DP_VIDEO_OFF: + case ATOM_ENCODER_CMD_DP_VIDEO_ON: + args.v5.asCmdParam.ucAction = action; + args.v5.asCmdParam.ucDigId = dig->dig_encoder; + break; + default: + DRM_ERROR("Unsupported action 0x%x\n", action); + break; + } + break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; @@ -710,11 +757,12 @@ union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 v6; }; void amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action, - uint8_t lane_num, uint8_t lane_set) + uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; struct amdgpu_device *adev = dev->dev_private; @@ -1066,6 +1114,54 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a args.v5.ucDigEncoderSel = 1 << dig_encoder; args.v5.ucDPLaneSet = lane_set; break; + case 6: + args.v6.ucAction = action; + if (is_dp) + args.v6.ulSymClock = cpu_to_le32(dp_clock / 10); + else + args.v6.ulSymClock = cpu_to_le32(amdgpu_encoder->pixel_clock / 10); + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYB; + else + args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYA; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYD; + else + args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYC; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYF; + else + args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYE; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYG; + break; + } + if (is_dp) + args.v6.ucLaneNum = dp_lane_count; + else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) + args.v6.ucLaneNum = 8; + else + args.v6.ucLaneNum = 4; + args.v6.ucConnObjId = connector_object_id; + if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) + args.v6.ucDPLaneSet = lane_set; + else + args.v6.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); + + if (hpd_id == AMDGPU_HPD_NONE) + args.v6.ucHPDSel = 0; + else + args.v6.ucHPDSel = hpd_id + 1; + args.v6.ucDigEncoderSel = 1 << dig_encoder; + break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c index 13cdb01e9b45..bc56c8a181e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c @@ -156,3 +156,18 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } +void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data) +{ + PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); + + args.ucRegIndex = offset; + args.lpI2CDataOut = data; + args.ucFlag = 1; + args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; + args.ucTransBytes = 1; + args.ucSlaveAddr = slave_addr; + args.ucLineNumber = line_number; + + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); +} diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h index d6128d9de56e..251aaf41f65d 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h @@ -27,5 +27,7 @@ int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num); u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap); +void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, + u8 slave_addr, u8 line_number, u8 offset, u8 data); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 1f9109d3348b..e2f0e5d58d5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -50,7 +50,9 @@ #include "gmc/gmc_7_1_sh_mask.h" MODULE_FIRMWARE("radeon/bonaire_smc.bin"); +MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); MODULE_FIRMWARE("radeon/hawaii_smc.bin"); +MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -84,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt = { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }; +#if 0 static const struct ci_pt_defaults defaults_bonaire_pro = { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } }; +#endif static const struct ci_pt_defaults defaults_saturn_xt = { @@ -98,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt = { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } }; +#if 0 static const struct ci_pt_defaults defaults_saturn_pro = { 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } }; +#endif static const struct ci_pt_config_reg didt_config_ci[] = { @@ -736,19 +742,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) if (pi->caps_sq_ramping || pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = ci_program_pt_config_registers(adev, didt_config_ci); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } ci_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; @@ -2549,19 +2555,17 @@ static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev, return 0; } -static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, - u32 sclk, u32 min_sclk_in_sr) +static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr) { u32 i; u32 tmp; - u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? - min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; + u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK); if (sclk < min) return 0; for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { - tmp = sclk / (1 << i); + tmp = sclk >> i; if (tmp >= min || i == 0) break; } @@ -3032,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev, if (pi->mclk_stutter_mode_threshold && (memory_clock <= pi->mclk_stutter_mode_threshold) && - (pi->uvd_enabled == false) && + (!pi->uvd_enabled) && (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && (adev->pm.dpm.new_active_crtc_count <= 2)) memory_level->StutterEnable = true; @@ -3358,8 +3362,7 @@ static int ci_populate_single_graphic_level(struct amdgpu_device *adev, graphic_level->PowerThrottle = 0; if (pi->caps_sclk_ds) - graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev, - engine_clock, + graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock, CISLAND_MINIMUM_ENGINE_CLOCK); graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; @@ -3639,6 +3642,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev) ci_setup_default_pcie_tables(adev); + /* save a copy of the default DPM table */ + memcpy(&(pi->golden_dpm_table), &(pi->dpm_table), + sizeof(struct ci_dpm_table)); + return 0; } @@ -5757,10 +5764,18 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - chip_name = "bonaire"; + if ((adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->device == 0x665f)) + chip_name = "bonaire_k"; + else + chip_name = "bonaire"; break; case CHIP_HAWAII: - chip_name = "hawaii"; + if (adev->pdev->revision == 0x80) + chip_name = "hawaii_k"; + else + chip_name = "hawaii"; break; case CHIP_KAVERI: case CHIP_KABINI: @@ -6224,6 +6239,9 @@ static int ci_dpm_sw_fini(void *handle) ci_dpm_fini(adev); mutex_unlock(&adev->pm.mutex); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -6309,215 +6327,6 @@ static int ci_dpm_wait_for_idle(void *handle) return 0; } -static void ci_dpm_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "CIK DPM registers\n"); - dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n", - RREG32(mmBIOS_SCRATCH_4)); - dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n", - RREG32(mmMC_ARB_DRAM_TIMING)); - dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n", - RREG32(mmMC_ARB_DRAM_TIMING2)); - dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n", - RREG32(mmMC_ARB_BURST_TIME)); - dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n", - RREG32(mmMC_ARB_DRAM_TIMING_1)); - dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n", - RREG32(mmMC_ARB_DRAM_TIMING2_1)); - dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n", - RREG32(mmMC_CG_CONFIG)); - dev_info(adev->dev, " MC_ARB_CG=0x%08X\n", - RREG32(mmMC_ARB_CG)); - dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_SQ_CTRL0)); - dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_DB_CTRL0)); - dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_TD_CTRL0)); - dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_TCP_CTRL0)); - dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n", - RREG32_SMC(ixCG_THERMAL_INT)); - dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n", - RREG32_SMC(ixCG_THERMAL_CTRL)); - dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n", - RREG32_SMC(ixGENERAL_PWRMGT)); - dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n", - RREG32(mmMC_SEQ_CNTL_3)); - dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n", - RREG32_SMC(ixLCAC_MC0_CNTL)); - dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n", - RREG32_SMC(ixLCAC_MC1_CNTL)); - dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n", - RREG32_SMC(ixLCAC_CPL_CNTL)); - dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n", - RREG32_SMC(ixSCLK_PWRMGT_CNTL)); - dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n", - RREG32(mmBIF_LNCNT_RESET)); - dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n", - RREG32_SMC(ixFIRMWARE_FLAGS)); - dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n", - RREG32_SMC(ixCG_SPLL_FUNC_CNTL)); - dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n", - RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2)); - dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n", - RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3)); - dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n", - RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4)); - dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n", - RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM)); - dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n", - RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2)); - dev_info(adev->dev, " DLL_CNTL=0x%08X\n", - RREG32(mmDLL_CNTL)); - dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n", - RREG32(mmMCLK_PWRMGT_CNTL)); - dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n", - RREG32(mmMPLL_AD_FUNC_CNTL)); - dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n", - RREG32(mmMPLL_DQ_FUNC_CNTL)); - dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n", - RREG32(mmMPLL_FUNC_CNTL)); - dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n", - RREG32(mmMPLL_FUNC_CNTL_1)); - dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n", - RREG32(mmMPLL_FUNC_CNTL_2)); - dev_info(adev->dev, " MPLL_SS1=0x%08X\n", - RREG32(mmMPLL_SS1)); - dev_info(adev->dev, " MPLL_SS2=0x%08X\n", - RREG32(mmMPLL_SS2)); - dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n", - RREG32_SMC(ixCG_DISPLAY_GAP_CNTL)); - dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n", - RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2)); - dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n", - RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7)); - dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n", - RREG32_SMC(ixRCU_UC_EVENTS)); - dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n", - RREG32_SMC(ixDPM_TABLE_475)); - dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n", - RREG32(mmMC_SEQ_RAS_TIMING_LP)); - dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n", - RREG32(mmMC_SEQ_RAS_TIMING)); - dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n", - RREG32(mmMC_SEQ_CAS_TIMING_LP)); - dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n", - RREG32(mmMC_SEQ_CAS_TIMING)); - dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n", - RREG32(mmMC_SEQ_DLL_STBY_LP)); - dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n", - RREG32(mmMC_SEQ_DLL_STBY)); - dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n", - RREG32(mmMC_SEQ_G5PDX_CMD0_LP)); - dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n", - RREG32(mmMC_SEQ_G5PDX_CMD0)); - dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n", - RREG32(mmMC_SEQ_G5PDX_CMD1_LP)); - dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n", - RREG32(mmMC_SEQ_G5PDX_CMD1)); - dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n", - RREG32(mmMC_SEQ_G5PDX_CTRL_LP)); - dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n", - RREG32(mmMC_SEQ_G5PDX_CTRL)); - dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n", - RREG32(mmMC_SEQ_PMG_DVS_CMD_LP)); - dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n", - RREG32(mmMC_SEQ_PMG_DVS_CMD)); - dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n", - RREG32(mmMC_SEQ_PMG_DVS_CTL_LP)); - dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n", - RREG32(mmMC_SEQ_PMG_DVS_CTL)); - dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n", - RREG32(mmMC_SEQ_MISC_TIMING_LP)); - dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n", - RREG32(mmMC_SEQ_MISC_TIMING)); - dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n", - RREG32(mmMC_SEQ_MISC_TIMING2_LP)); - dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n", - RREG32(mmMC_SEQ_MISC_TIMING2)); - dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n", - RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP)); - dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n", - RREG32(mmMC_PMG_CMD_EMRS)); - dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n", - RREG32(mmMC_SEQ_PMG_CMD_MRS_LP)); - dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n", - RREG32(mmMC_PMG_CMD_MRS)); - dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n", - RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP)); - dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n", - RREG32(mmMC_PMG_CMD_MRS1)); - dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n", - RREG32(mmMC_SEQ_WR_CTL_D0_LP)); - dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n", - RREG32(mmMC_SEQ_WR_CTL_D0)); - dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n", - RREG32(mmMC_SEQ_WR_CTL_D1_LP)); - dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n", - RREG32(mmMC_SEQ_WR_CTL_D1)); - dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n", - RREG32(mmMC_SEQ_RD_CTL_D0_LP)); - dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n", - RREG32(mmMC_SEQ_RD_CTL_D0)); - dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n", - RREG32(mmMC_SEQ_RD_CTL_D1_LP)); - dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n", - RREG32(mmMC_SEQ_RD_CTL_D1)); - dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n", - RREG32(mmMC_SEQ_PMG_TIMING_LP)); - dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n", - RREG32(mmMC_SEQ_PMG_TIMING)); - dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n", - RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP)); - dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n", - RREG32(mmMC_PMG_CMD_MRS2)); - dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n", - RREG32(mmMC_SEQ_WR_CTL_2_LP)); - dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n", - RREG32(mmMC_SEQ_WR_CTL_2)); - dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n", - RREG32_PCIE(ixPCIE_LC_SPEED_CNTL)); - dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n", - RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL)); - dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n", - RREG32(mmSMC_IND_INDEX_0)); - dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n", - RREG32(mmSMC_IND_DATA_0)); - dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n", - RREG32(mmSMC_IND_ACCESS_CNTL)); - dev_info(adev->dev, " SMC_RESP_0=0x%08X\n", - RREG32(mmSMC_RESP_0)); - dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n", - RREG32(mmSMC_MESSAGE_0)); - dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n", - RREG32_SMC(ixSMC_SYSCON_RESET_CNTL)); - dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n", - RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0)); - dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n", - RREG32_SMC(ixSMC_SYSCON_MISC_CNTL)); - dev_info(adev->dev, " SMC_PC_C=0x%08X\n", - RREG32_SMC(ixSMC_PC_C)); -} - static int ci_dpm_soft_reset(void *handle) { return 0; @@ -6572,7 +6381,7 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, } static int ci_dpm_process_interrupt(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, + struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { bool queue_thermal = false; @@ -6613,7 +6422,188 @@ static int ci_dpm_set_powergating_state(void *handle, return 0; } +static int ci_dpm_print_clock_levels(struct amdgpu_device *adev, + enum pp_clock_type type, char *buf) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; + struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; + struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; + + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = ci_get_current_pcie_speed(adev); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + + return size; +} + +static int ci_dpm_force_clock_level(struct amdgpu_device *adev, + enum pp_clock_type type, uint32_t mask) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (adev->pm.dpm.forced_level + != AMDGPU_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!pi->sclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + + case PP_MCLK: + if (!pi->mclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + + case PP_PCIE: + { + uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!pi->pcie_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int ci_dpm_get_sclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].sclk = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int ci_dpm_get_mclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].mclk = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + const struct amd_ip_funcs ci_dpm_ip_funcs = { + .name = "ci_dpm", .early_init = ci_dpm_early_init, .late_init = ci_dpm_late_init, .sw_init = ci_dpm_sw_init, @@ -6625,7 +6615,6 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = { .is_idle = ci_dpm_is_idle, .wait_for_idle = ci_dpm_wait_for_idle, .soft_reset = ci_dpm_soft_reset, - .print_status = ci_dpm_print_status, .set_clockgating_state = ci_dpm_set_clockgating_state, .set_powergating_state = ci_dpm_set_powergating_state, }; @@ -6647,6 +6636,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = { .get_fan_control_mode = &ci_dpm_get_fan_control_mode, .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, + .print_clock_levels = ci_dpm_print_clock_levels, + .force_clock_level = ci_dpm_force_clock_level, + .get_sclk_od = ci_dpm_get_sclk_od, + .set_sclk_od = ci_dpm_set_sclk_od, + .get_mclk_od = ci_dpm_get_mclk_od, + .set_mclk_od = ci_dpm_set_mclk_od, }; static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h index faccc30c93bf..91be2996ae7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h @@ -193,6 +193,7 @@ struct ci_pt_defaults { struct ci_power_info { struct ci_dpm_table dpm_table; + struct ci_dpm_table golden_dpm_table; u32 voltage_control; u32 mvdd_control; u32 vddci_control; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index bddc9ba11495..4efc901f658c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state) uint32_t tmp; tmp = RREG32(mmCONFIG_CNTL); - if (state == false) + if (!state) tmp |= CONFIG_CNTL__VGA_DIS_MASK; else tmp &= ~CONFIG_CNTL__VGA_DIS_MASK; @@ -962,7 +962,13 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { +static u32 cik_get_virtual_caps(struct amdgpu_device *adev) +{ + /* CIK does not support SR-IOV */ + return 0; +} + +static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS, false}, {mmGB_ADDR_CONFIG, false}, {mmMC_ARB_RAMCFG, false}, @@ -1029,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -1152,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); } -static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) +static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) { struct kv_reset_save_regs kv_save = { 0 }; u32 i; + int r = -EINVAL; dev_info(adev->dev, "GPU pci config reset\n"); @@ -1171,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + r = 0; break; + } udelay(1); } /* does asic init need to be run first??? */ if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); + + return r; } static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -1204,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu */ static int cik_asic_reset(struct amdgpu_device *adev) { + int r; cik_set_bios_scratch_engine_hung(adev, true); - cik_gpu_pci_config_reset(adev); + r = cik_gpu_pci_config_reset(adev); cik_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -2007,10 +2021,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .get_xclk = &cik_get_xclk, .set_uvd_clocks = &cik_set_uvd_clocks, .set_vce_clocks = &cik_set_vce_clocks, - .get_cu_info = &gfx_v7_0_get_cu_info, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, + .get_virtual_caps = &cik_get_virtual_caps, }; static int cik_common_early_init(void *handle) @@ -2214,11 +2225,6 @@ static int cik_common_wait_for_idle(void *handle) return 0; } -static void cik_common_print_status(void *handle) -{ - -} - static int cik_common_soft_reset(void *handle) { /* XXX hard reset?? */ @@ -2238,6 +2244,7 @@ static int cik_common_set_powergating_state(void *handle, } const struct amd_ip_funcs cik_common_ip_funcs = { + .name = "cik_common", .early_init = cik_common_early_init, .late_init = NULL, .sw_init = cik_common_sw_init, @@ -2249,7 +2256,6 @@ const struct amd_ip_funcs cik_common_ip_funcs = { .is_idle = cik_common_is_idle, .wait_for_idle = cik_common_wait_for_idle, .soft_reset = cik_common_soft_reset, - .print_status = cik_common_print_status, .set_clockgating_state = cik_common_set_clockgating_state, .set_powergating_state = cik_common_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 30c9b3beeef9..be3d6f79a864 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev) */ static int cik_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; u64 wptr_off; @@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev) /* enable irqs */ cik_ih_enable_interrupts(adev); - return ret; + return 0; } /** @@ -243,7 +242,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, /* wptr/rptr are in bytes! */ u32 ring_index = adev->irq.ih.rptr >> 2; uint32_t dw[4]; - + dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); @@ -372,35 +371,6 @@ static int cik_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void cik_ih_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "CIK IH registers\n"); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", - RREG32(mmINTERRUPT_CNTL)); - dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", - RREG32(mmINTERRUPT_CNTL2)); - dev_info(adev->dev, " IH_CNTL=0x%08X\n", - RREG32(mmIH_CNTL)); - dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", - RREG32(mmIH_RB_CNTL)); - dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", - RREG32(mmIH_RB_BASE)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_LO)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_HI)); - dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", - RREG32(mmIH_RB_RPTR)); - dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", - RREG32(mmIH_RB_WPTR)); -} - static int cik_ih_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -412,8 +382,6 @@ static int cik_ih_soft_reset(void *handle) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; if (srbm_soft_reset) { - cik_ih_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -428,8 +396,6 @@ static int cik_ih_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - cik_ih_print_status((void *)adev); } return 0; @@ -448,6 +414,7 @@ static int cik_ih_set_powergating_state(void *handle, } const struct amd_ip_funcs cik_ih_ip_funcs = { + .name = "cik_ih", .early_init = cik_ih_early_init, .late_init = NULL, .sw_init = cik_ih_sw_init, @@ -459,7 +426,6 @@ const struct amd_ip_funcs cik_ih_ip_funcs = { .is_idle = cik_ih_is_idle, .wait_for_idle = cik_ih_wait_for_idle, .soft_reset = cik_ih_soft_reset, - .print_status = cik_ih_print_status, .set_clockgating_state = cik_ih_set_clockgating_state, .set_powergating_state = cik_ih_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index d3ac3298fba8..ee6466912497 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); + +static void cik_sdma_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous @@ -210,20 +220,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (CIK). */ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 extra_bits = ib->vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 4) - next_rptr++; - - next_rptr += 4; - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); /* number of DWs to follow */ - amdgpu_ring_write(ring, next_rptr); + u32 extra_bits = vm_id & 0xf; /* IB packet must end on a 8 DW boundary */ cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8); @@ -354,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) u32 me_cntl; int i; - if (enable == false) { + if (!enable) { cik_sdma_gfx_stop(adev); cik_sdma_rlc_stop(adev); } @@ -418,6 +418,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -445,7 +447,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + + cik_sdma_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -528,8 +535,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) if (r) return r; - /* unhalt the MEs */ - cik_sdma_enable(adev, true); + /* halt the engine before programing */ + cik_sdma_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -610,20 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure. */ -static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) +static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -633,42 +639,40 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } - ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: @@ -976,7 +980,7 @@ static int cik_sdma_sw_init(void *handle) ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 256 * 1024, + r = amdgpu_ring_init(adev, ring, 1024, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, &adev->sdma.trap_irq, (i == 0) ? @@ -997,6 +1001,7 @@ static int cik_sdma_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + cik_sdma_free_microcode(adev); return 0; } @@ -1064,57 +1069,6 @@ static int cik_sdma_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void cik_sdma_print_status(void *handle) -{ - int i, j; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "CIK SDMA registers\n"); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - for (i = 0; i < adev->sdma.num_instances; i++) { - dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", - i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", - i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); - mutex_lock(&adev->srbm_mutex); - for (j = 0; j < 16; j++) { - cik_srbm_select(adev, 0, 0, 0, j); - dev_info(adev->dev, " VM %d:\n", j); - dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n", - RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n", - RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); - } - cik_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); - } -} - static int cik_sdma_soft_reset(void *handle) { u32 srbm_soft_reset = 0; @@ -1137,8 +1091,6 @@ static int cik_sdma_soft_reset(void *handle) } if (srbm_soft_reset) { - cik_sdma_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -1153,8 +1105,6 @@ static int cik_sdma_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - cik_sdma_print_status((void *)adev); } return 0; @@ -1278,6 +1228,7 @@ static int cik_sdma_set_powergating_state(void *handle, } const struct amd_ip_funcs cik_sdma_ip_funcs = { + .name = "cik_sdma", .early_init = cik_sdma_early_init, .late_init = NULL, .sw_init = cik_sdma_sw_init, @@ -1289,7 +1240,6 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = { .is_idle = cik_sdma_is_idle, .wait_for_idle = cik_sdma_wait_for_idle, .soft_reset = cik_sdma_soft_reset, - .print_status = cik_sdma_print_status, .set_clockgating_state = cik_sdma_set_clockgating_state, .set_powergating_state = cik_sdma_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 60d4493206dd..c4f6f00d62bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -190,8 +190,8 @@ # define MACRO_TILE_ASPECT(x) ((x) << 4) # define NUM_BANKS(x) ((x) << 6) -#define MSG_ENTER_RLC_SAFE_MODE 1 -#define MSG_EXIT_RLC_SAFE_MODE 0 +#define MSG_ENTER_RLC_SAFE_MODE 1 +#define MSG_EXIT_RLC_SAFE_MODE 0 /* * PM4 diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index e7ef2261ff4a..2a11413ed54a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->mgcg_cgtt_local1 = 0x0; pi->clock_slow_down_step = 25000; pi->skip_clock_slow_down = 1; - pi->enable_nb_ps_policy = 0; + pi->enable_nb_ps_policy = false; pi->caps_power_containment = true; pi->caps_cac = true; pi->didt_enabled = false; @@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) { - int ret = 0; struct cz_power_info *pi = cz_get_pi(adev); if (pi->caps_sclk_ds) { @@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) CZ_MIN_DEEP_SLEEP_SCLK); } - return ret; + return 0; } /* ?? without dal support, is this still needed in setpowerstate list*/ static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) { - int ret = 0; struct cz_power_info *pi = cz_get_pi(adev); cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetWatermarkFrequency, pi->sclk_dpm.soft_max_clk); - return ret; + return 0; } static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) @@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) { - int ret = 0; struct cz_power_info *pi = cz_get_pi(adev); struct cz_ps *ps = &pi->requested_ps; @@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) cz_dpm_nbdpm_lm_pstate_enable(adev, true); } - return ret; + return 0; } /* with dpm enabled */ static int cz_dpm_set_power_state(struct amdgpu_device *adev) { - int ret = 0; - cz_dpm_update_sclk_limit(adev); cz_dpm_set_deep_sleep_sclk_threshold(adev); cz_dpm_set_watermark_threshold(adev); cz_dpm_enable_nbdpm(adev); cz_dpm_update_low_memory_pstate(adev); - return ret; + return 0; } static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) @@ -2224,12 +2219,14 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) } } } else { /*pi->caps_vce_pg*/ + pi->vce_power_gated = gate; cz_update_vce_dpm(adev); cz_enable_vce_dpm(adev, !gate); } } const struct amd_ip_funcs cz_dpm_ip_funcs = { + .name = "cz_dpm", .early_init = cz_dpm_early_init, .late_init = cz_dpm_late_init, .sw_init = cz_dpm_sw_init, @@ -2241,7 +2238,6 @@ const struct amd_ip_funcs cz_dpm_ip_funcs = { .is_idle = NULL, .wait_for_idle = NULL, .soft_reset = NULL, - .print_status = NULL, .set_clockgating_state = cz_dpm_set_clockgating_state, .set_powergating_state = cz_dpm_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index c79638f8e732..3d23a70b6432 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev) */ static int cz_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; u64 wptr_off; @@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ cz_ih_enable_interrupts(adev); - return ret; + return 0; } /** @@ -222,7 +221,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, /* wptr/rptr are in bytes! */ u32 ring_index = adev->irq.ih.rptr >> 2; uint32_t dw[4]; - + dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); @@ -351,35 +350,6 @@ static int cz_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void cz_ih_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "CZ IH registers\n"); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", - RREG32(mmINTERRUPT_CNTL)); - dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", - RREG32(mmINTERRUPT_CNTL2)); - dev_info(adev->dev, " IH_CNTL=0x%08X\n", - RREG32(mmIH_CNTL)); - dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", - RREG32(mmIH_RB_CNTL)); - dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", - RREG32(mmIH_RB_BASE)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_LO)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_HI)); - dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", - RREG32(mmIH_RB_RPTR)); - dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", - RREG32(mmIH_RB_WPTR)); -} - static int cz_ih_soft_reset(void *handle) { u32 srbm_soft_reset = 0; @@ -391,8 +361,6 @@ static int cz_ih_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { - cz_ih_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -407,8 +375,6 @@ static int cz_ih_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - cz_ih_print_status((void *)adev); } return 0; @@ -429,6 +395,7 @@ static int cz_ih_set_powergating_state(void *handle, } const struct amd_ip_funcs cz_ih_ip_funcs = { + .name = "cz_ih", .early_init = cz_ih_early_init, .late_init = NULL, .sw_init = cz_ih_sw_init, @@ -440,7 +407,6 @@ const struct amd_ip_funcs cz_ih_ip_funcs = { .is_idle = cz_ih_is_idle, .wait_for_idle = cz_ih_wait_for_idle, .soft_reset = cz_ih_soft_reset, - .print_status = cz_ih_print_status, .set_clockgating_state = cz_ih_set_clockgating_state, .set_powergating_state = cz_ih_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h index 924d355b4e2c..026342fcf0f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h +++ b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h @@ -77,7 +77,7 @@ struct cz_smu_private_data { uint8_t driver_buffer_length; uint8_t scratch_buffer_length; uint16_t toc_entry_used_count; - uint16_t toc_entry_initialize_index; + uint16_t toc_entry_initialize_index; uint16_t toc_entry_power_profiling_index; uint16_t toc_entry_aram; uint16_t toc_entry_ih_register_restore_task_index; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 6de2ce535e37..c1b04e9aab57 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -284,10 +284,16 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) * surface base address. */ static void dce_v10_0_page_flip(struct amdgpu_device *adev, - int crtc_id, u64 crtc_base) + int crtc_id, u64 crtc_base, bool async) { struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + u32 tmp; + /* flip at hsync for async, default is vsync */ + tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, + GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0); + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); /* update the primary scanout address */ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(crtc_base)); @@ -2211,6 +2217,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, dce_v10_0_vga_enable(crtc, false); + /* Make sure surface address is updated at vertical blank rather than + * horizontal blank + */ + tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, + GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(fb_location)); WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2261,13 +2275,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* pageflip setup */ - /* make sure flip is at vb rather than hb */ - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* set pageflip to happen only at start of vblank interval (front porch) */ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); @@ -2587,7 +2594,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, return -EINVAL; } - obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); return -ENOENT; @@ -2660,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v10_0_crtc_load_lut(crtc); + + return 0; } static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) @@ -2710,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v10_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -2992,6 +3001,8 @@ static int dce_v10_0_sw_init(void *handle) adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev->ddev->mode_config.async_page_flip = true; + adev->ddev->mode_config.max_width = 16384; adev->ddev->mode_config.max_height = 16384; @@ -3130,14 +3141,6 @@ static int dce_v10_0_wait_for_idle(void *handle) return 0; } -static void dce_v10_0_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "DCE 10.x registers\n"); - /* XXX todo */ -} - static int dce_v10_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; @@ -3147,8 +3150,6 @@ static int dce_v10_0_soft_reset(void *handle) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; if (srbm_soft_reset) { - dce_v10_0_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -3163,7 +3164,6 @@ static int dce_v10_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - dce_v10_0_print_status((void *)adev); } return 0; } @@ -3370,11 +3370,11 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, /* wakeup usersapce */ if (works->event) - drm_send_vblank_event(adev->ddev, crtc_id, works->event); + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; @@ -3501,6 +3501,7 @@ static int dce_v10_0_set_powergating_state(void *handle, } const struct amd_ip_funcs dce_v10_0_ip_funcs = { + .name = "dce_v10_0", .early_init = dce_v10_0_early_init, .late_init = NULL, .sw_init = dce_v10_0_sw_init, @@ -3512,7 +3513,6 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = { .is_idle = dce_v10_0_is_idle, .wait_for_idle = dce_v10_0_wait_for_idle, .soft_reset = dce_v10_0_soft_reset, - .print_status = dce_v10_0_print_status, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index e9ccc6b787f3..d4bf133908b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -132,6 +132,22 @@ static const u32 stoney_golden_settings_a11[] = mmFBC_MISC, 0x1f311fff, 0x14302000, }; +static const u32 polaris11_golden_settings_a11[] = +{ + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_DEBUG1, 0xffffffff, 0x00000008, + mmFBC_MISC, 0x9f313fff, 0x14302008, + mmHDMI_CONTROL, 0x313f031f, 0x00000011, +}; + +static const u32 polaris10_golden_settings_a11[] = +{ + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x9f313fff, 0x14302008, + mmHDMI_CONTROL, 0x313f031f, 0x00000011, +}; static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) { @@ -149,6 +165,16 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) stoney_golden_settings_a11, (const u32)ARRAY_SIZE(stoney_golden_settings_a11)); break; + case CHIP_POLARIS11: + amdgpu_program_register_sequence(adev, + polaris11_golden_settings_a11, + (const u32)ARRAY_SIZE(polaris11_golden_settings_a11)); + break; + case CHIP_POLARIS10: + amdgpu_program_register_sequence(adev, + polaris10_golden_settings_a11, + (const u32)ARRAY_SIZE(polaris10_golden_settings_a11)); + break; default: break; } @@ -276,10 +302,16 @@ static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) * surface base address. */ static void dce_v11_0_page_flip(struct amdgpu_device *adev, - int crtc_id, u64 crtc_base) + int crtc_id, u64 crtc_base, bool async) { struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + u32 tmp; + /* flip immediate for async, default is vsync */ + tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, + GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0); + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); /* update the scanout addresses */ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(crtc_base)); @@ -565,35 +597,14 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), CRTC_CONTROL, CRTC_MASTER_EN); if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - +#if 1 save->crtc_enabled[i] = true; tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + /*it is correct only for RGB ; black is 0*/ + WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); } #else /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ @@ -614,54 +625,20 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { - u32 tmp, frame_count; - int i, j; + u32 tmp; + int i; /* update crtc base addresses */ for (i = 0; i < adev->mode_info.num_crtc; i++) { WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); if (save->crtc_enabled[i]) { - tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); - WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } } } @@ -1624,6 +1601,7 @@ static const u32 pin_offsets[] = AUD4_REGISTER_OFFSET, AUD5_REGISTER_OFFSET, AUD6_REGISTER_OFFSET, + AUD7_REGISTER_OFFSET, }; static int dce_v11_0_audio_init(struct amdgpu_device *adev) @@ -1635,7 +1613,20 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev) adev->mode_info.audio.enabled = true; - adev->mode_info.audio.num_pins = 7; + switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->mode_info.audio.num_pins = 7; + break; + case CHIP_POLARIS10: + adev->mode_info.audio.num_pins = 8; + break; + case CHIP_POLARIS11: + adev->mode_info.audio.num_pins = 6; + break; + default: + return -EINVAL; + } for (i = 0; i < adev->mode_info.audio.num_pins; i++) { adev->mode_info.audio.pin[i].channels = -1; @@ -2201,6 +2192,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, dce_v11_0_vga_enable(crtc, false); + /* Make sure surface address is updated at vertical blank rather than + * horizontal blank + */ + tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, + GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(fb_location)); WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2251,13 +2250,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* pageflip setup */ - /* make sure flip is at vb rather than hb */ - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* set pageflip to happen only at start of vblank interval (front porch) */ WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); @@ -2427,6 +2419,40 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) u32 pll_in_use; int pll; + if ((adev->asic_type == CHIP_POLARIS10) || + (adev->asic_type == CHIP_POLARIS11)) { + struct amdgpu_encoder *amdgpu_encoder = + to_amdgpu_encoder(amdgpu_crtc->encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) + return ATOM_DP_DTO; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return ATOM_COMBOPHY_PLL1; + else + return ATOM_COMBOPHY_PLL0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return ATOM_COMBOPHY_PLL3; + else + return ATOM_COMBOPHY_PLL2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return ATOM_COMBOPHY_PLL5; + else + return ATOM_COMBOPHY_PLL4; + break; + default: + DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); + return ATOM_PPLL_INVALID; + } + } + if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { if (adev->clock.dp_extclk) /* skip PPLL programming if using ext clock */ @@ -2578,7 +2604,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, return -EINVAL; } - obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); return -ENOENT; @@ -2651,19 +2677,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v11_0_crtc_load_lut(crtc); + + return 0; } static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) @@ -2701,13 +2729,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v11_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -2782,7 +2810,17 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL2: /* disable the ppll */ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + break; + case ATOM_COMBOPHY_PLL0: + case ATOM_COMBOPHY_PLL1: + case ATOM_COMBOPHY_PLL2: + case ATOM_COMBOPHY_PLL3: + case ATOM_COMBOPHY_PLL4: + case ATOM_COMBOPHY_PLL5: + /* disable the ppll */ + amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id, + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; default: break; @@ -2800,11 +2838,28 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; if (!amdgpu_crtc->adjusted_clock) return -EINVAL; - amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); + if ((adev->asic_type == CHIP_POLARIS10) || + (adev->asic_type == CHIP_POLARIS11)) { + struct amdgpu_encoder *amdgpu_encoder = + to_amdgpu_encoder(amdgpu_crtc->encoder); + int encoder_mode = + amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); + + /* SetPixelClock calculates the plls and ss values now */ + amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, + amdgpu_crtc->pll_id, + encoder_mode, amdgpu_encoder->encoder_id, + adjusted_mode->clock, 0, 0, 0, 0, + amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss); + } else { + amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); + } amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); @@ -2955,6 +3010,16 @@ static int dce_v11_0_early_init(void *handle) adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; + case CHIP_POLARIS10: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_POLARIS11: + adev->mode_info.num_crtc = 5; + adev->mode_info.num_hpd = 5; + adev->mode_info.num_dig = 5; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -2987,6 +3052,8 @@ static int dce_v11_0_sw_init(void *handle) adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev->ddev->mode_config.async_page_flip = true; + adev->ddev->mode_config.max_width = 16384; adev->ddev->mode_config.max_height = 16384; @@ -3057,7 +3124,15 @@ static int dce_v11_0_hw_init(void *handle) /* init dig PHYs, disp eng pll */ amdgpu_atombios_crtc_powergate_init(adev); amdgpu_atombios_encoder_init_dig(adev); - amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + if ((adev->asic_type == CHIP_POLARIS10) || + (adev->asic_type == CHIP_POLARIS11)) { + amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, + DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); + amdgpu_atombios_crtc_set_dce_clock(adev, 0, + DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS); + } else { + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + } /* initialize hpd */ dce_v11_0_hpd_init(adev); @@ -3126,14 +3201,6 @@ static int dce_v11_0_wait_for_idle(void *handle) return 0; } -static void dce_v11_0_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "DCE 10.x registers\n"); - /* XXX todo */ -} - static int dce_v11_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; @@ -3143,8 +3210,6 @@ static int dce_v11_0_soft_reset(void *handle) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; if (srbm_soft_reset) { - dce_v11_0_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -3159,7 +3224,6 @@ static int dce_v11_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - dce_v11_0_print_status((void *)adev); } return 0; } @@ -3366,11 +3430,11 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, /* wakeup usersapce */ if(works->event) - drm_send_vblank_event(adev->ddev, crtc_id, works->event); + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; @@ -3497,6 +3561,7 @@ static int dce_v11_0_set_powergating_state(void *handle, } const struct amd_ip_funcs dce_v11_0_ip_funcs = { + .name = "dce_v11_0", .early_init = dce_v11_0_early_init, .late_init = NULL, .sw_init = dce_v11_0_sw_init, @@ -3508,7 +3573,6 @@ const struct amd_ip_funcs dce_v11_0_ip_funcs = { .is_idle = dce_v11_0_is_idle, .wait_for_idle = dce_v11_0_wait_for_idle, .soft_reset = dce_v11_0_soft_reset, - .print_status = dce_v11_0_print_status, .set_clockgating_state = dce_v11_0_set_clockgating_state, .set_powergating_state = dce_v11_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index e56b55d8c280..4fdfab1e9200 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -233,10 +233,13 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) * surface base address. */ static void dce_v8_0_page_flip(struct amdgpu_device *adev, - int crtc_id, u64 crtc_base) + int crtc_id, u64 crtc_base, bool async) { struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + /* flip at hsync for async, default is vsync */ + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? + GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0); /* update the primary scanout addresses */ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(crtc_base)); @@ -523,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), CRTC_CONTROL, CRTC_MASTER_EN); if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - +#if 1 save->crtc_enabled[i] = true; tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + /*it is correct only for RGB ; black is 0*/ + WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); } + mdelay(20); #else /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); @@ -572,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { - u32 tmp, frame_count; - int i, j; + u32 tmp; + int i; /* update crtc base addresses */ for (i = 0; i < adev->mode_info.num_crtc; i++) { WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } } + mdelay(20); } WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); @@ -1999,7 +1949,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, uint32_t fb_format, fb_pitch_pixels; u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); u32 pipe_config; - u32 tmp, viewport_w, viewport_h; + u32 viewport_w, viewport_h; int r; bool bypass_lut = false; @@ -2135,6 +2085,11 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, dce_v8_0_vga_enable(crtc, false); + /* Make sure surface address is updated at vertical blank rather than + * horizontal blank + */ + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(fb_location)); WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2182,12 +2137,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* pageflip setup */ - /* make sure flip is at vb rather than hb */ - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); - tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK; - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* set pageflip to happen only at start of vblank interval (front porch) */ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); @@ -2499,7 +2448,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, return -EINVAL; } - obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); return -ENOENT; @@ -2572,19 +2521,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v8_0_crtc_load_lut(crtc); + + return 0; } static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) @@ -2622,13 +2573,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v8_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -2902,6 +2853,8 @@ static int dce_v8_0_sw_init(void *handle) adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev->ddev->mode_config.async_page_flip = true; + adev->ddev->mode_config.max_width = 16384; adev->ddev->mode_config.max_height = 16384; @@ -3038,14 +2991,6 @@ static int dce_v8_0_wait_for_idle(void *handle) return 0; } -static void dce_v8_0_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "DCE 8.x registers\n"); - /* XXX todo */ -} - static int dce_v8_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; @@ -3055,8 +3000,6 @@ static int dce_v8_0_soft_reset(void *handle) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; if (srbm_soft_reset) { - dce_v8_0_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -3071,7 +3014,6 @@ static int dce_v8_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - dce_v8_0_print_status((void *)adev); } return 0; } @@ -3379,11 +3321,11 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, /* wakeup usersapce */ if (works->event) - drm_send_vblank_event(adev->ddev, crtc_id, works->event); + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; @@ -3431,6 +3373,7 @@ static int dce_v8_0_set_powergating_state(void *handle, } const struct amd_ip_funcs dce_v8_0_ip_funcs = { + .name = "dce_v8_0", .early_init = dce_v8_0_early_init, .late_init = NULL, .sw_init = dce_v8_0_sw_init, @@ -3442,7 +3385,6 @@ const struct amd_ip_funcs dce_v8_0_ip_funcs = { .is_idle = dce_v8_0_is_idle, .wait_for_idle = dce_v8_0_wait_for_idle, .soft_reset = dce_v8_0_soft_reset, - .print_status = dce_v8_0_print_status, .set_clockgating_state = dce_v8_0_set_clockgating_state, .set_powergating_state = dce_v8_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 4b0e45a27129..ed03b75175d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) static int fiji_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -143,6 +148,7 @@ static int fiji_dpm_set_powergating_state(void *handle, } const struct amd_ip_funcs fiji_dpm_ip_funcs = { + .name = "fiji_dpm", .early_init = fiji_dpm_early_init, .late_init = NULL, .sw_init = fiji_dpm_sw_init, @@ -154,7 +160,6 @@ const struct amd_ip_funcs fiji_dpm_ip_funcs = { .is_idle = NULL, .wait_for_idle = NULL, .soft_reset = NULL, - .print_status = NULL, .set_clockgating_state = fiji_dpm_set_clockgating_state, .set_powergating_state = fiji_dpm_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index b336c918d6a7..b3e19ba4c57f 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!fiji_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index bb8709066fd8..d869d058ef24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -53,7 +53,6 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); -int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *); MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); MODULE_FIRMWARE("radeon/bonaire_me.bin"); @@ -882,6 +881,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev); static void gfx_v7_0_init_pg(struct amdgpu_device *adev); +static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); /* * Core functions @@ -991,6 +991,22 @@ out: return err; } +static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) +{ + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; +} + /** * gfx_v7_0_tiling_mode_table_init - init the hw tiling table * @@ -1567,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * registers are instanced per SE or SH. 0xffffffff means * broadcast to all SEs or SHs (CIK). */ -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK; + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | @@ -1643,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -1718,6 +1740,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) gfx_v7_0_tiling_mode_table_init(adev); gfx_v7_0_setup_rb(adev); + gfx_v7_0_get_cu_info(adev); /* set HW defaults for 3D engine */ WREG32(mmCP_MEQ_THRESHOLDS, @@ -1729,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -2029,28 +2052,13 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, * on the gfx ring for execution by the GPU. */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - bool need_ctx_switch = ring->current_ctx != ib->ctx; u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - /* drop the CE preamble IB for the same context */ - if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) - return; - - if (need_ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (need_ctx_switch) { + if (ctx_switch) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -2060,7 +2068,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (ib->vm_id << 24); + control |= ib->length_dw | (vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2073,24 +2081,12 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - control |= INDIRECT_BUFFER_VALID; - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - - control |= ib->length_dw | (ib->vm_id << 24); - - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -2109,26 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, * Provides a basic gfx ring test to verify that IBs are working. * Returns 0 on success, error on failure. */ -static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -2136,25 +2131,23 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err2; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; + } + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); @@ -2162,7 +2155,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: @@ -3053,6 +3045,19 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, 0xffffffff); + amdgpu_ring_write(ring, 4); /* poll interval */ + if (usepfp) { /* synce CE with ME to prevent CE fetch CEIB before context switch done */ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); @@ -3080,18 +3085,6 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); - uint32_t seq = ring->fence_drv.sync_seq; - uint64_t addr = ring->fence_drv.gpu_addr; - - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3) | /* equal */ - WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ - amdgpu_ring_write(ring, addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); - amdgpu_ring_write(ring, seq); - amdgpu_ring_write(ring, 0xffffffff); - amdgpu_ring_write(ring, 4); /* poll interval */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | @@ -3206,7 +3199,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) } } adev->gfx.rlc.cs_data = ci_cs_data; - adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; + adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ + adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */ src_ptr = adev->gfx.rlc.reg_list; dws = adev->gfx.rlc.reg_list_size; @@ -3364,7 +3358,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3372,7 +3366,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3419,7 +3413,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) return orig; } -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp, i, mask; @@ -3441,7 +3435,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) } } -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp; @@ -3456,7 +3450,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) * * Halt the RLC ME (MicroEngine) (CIK). */ -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) { WREG32(mmRLC_CNTL, 0); @@ -3532,7 +3526,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3572,7 +3566,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3623,7 +3617,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3674,7 +3668,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -3852,6 +3846,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } +static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -3869,18 +3877,13 @@ static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) { - uint32_t tmp, active_cu_number; - struct amdgpu_cu_info cu_info; - - gfx_v7_0_get_cu_info(adev, &cu_info); - tmp = cu_info.ao_cu_mask; - active_cu_number = cu_info.number; + u32 tmp; - WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp); + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); tmp = RREG32(mmRLC_MAX_PG_CU); tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; - tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); + tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); WREG32(mmRLC_MAX_PG_CU, tmp); } @@ -4113,7 +4116,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) * Fetches a GPU clock counter snapshot (SI). * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -4173,12 +4176,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v7_0_select_se_sh, +}; + +static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { + .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, + .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode +}; + static int gfx_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v7_0_gfx_funcs; + adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; gfx_v7_0_set_ring_funcs(adev); gfx_v7_0_set_irq_funcs(adev); gfx_v7_0_set_gds_init(adev); @@ -4414,7 +4429,7 @@ static int gfx_v7_0_sw_init(void *handle) ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; sprintf(ring->name, "gfx"); - r = amdgpu_ring_init(adev, ring, 1024 * 1024, + r = amdgpu_ring_init(adev, ring, 1024, PACKET3(PACKET3_NOP, 0x3FFF), 0xf, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, AMDGPU_RING_TYPE_GFX); @@ -4438,10 +4453,10 @@ static int gfx_v7_0_sw_init(void *handle) ring->me = 1; /* first MEC */ ring->pipe = i / 8; ring->queue = i % 8; - sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); + sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; /* type-2 packets are deprecated on MEC, use type-3 instead */ - r = amdgpu_ring_init(adev, ring, 1024 * 1024, + r = amdgpu_ring_init(adev, ring, 1024, PACKET3(PACKET3_NOP, 0x3FFF), 0xf, &adev->gfx.eop_irq, irq_type, AMDGPU_RING_TYPE_COMPUTE); @@ -4495,6 +4510,7 @@ static int gfx_v7_0_sw_fini(void *handle) gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); + gfx_v7_0_free_microcode(adev); return 0; } @@ -4572,256 +4588,6 @@ static int gfx_v7_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void gfx_v7_0_print_status(void *handle) -{ - int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "GFX 7.x registers\n"); - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(mmGRBM_STATUS)); - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(mmGRBM_STATUS2)); - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(mmGRBM_STATUS_SE0)); - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(mmGRBM_STATUS_SE1)); - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", - RREG32(mmGRBM_STATUS_SE2)); - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", - RREG32(mmGRBM_STATUS_SE3)); - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT1)); - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT2)); - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT3)); - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", - RREG32(mmCP_CPF_BUSY_STAT)); - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPF_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPC_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); - - for (i = 0; i < 32; i++) { - dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n", - i, RREG32(mmGB_TILE_MODE0 + (i * 4))); - } - for (i = 0; i < 16; i++) { - dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n", - i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4))); - } - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - dev_info(adev->dev, " se: %d\n", i); - gfx_v7_0_select_se_sh(adev, i, 0xffffffff); - dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n", - RREG32(mmPA_SC_RASTER_CONFIG)); - dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n", - RREG32(mmPA_SC_RASTER_CONFIG_1)); - } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); - - dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n", - RREG32(mmGB_ADDR_CONFIG)); - dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n", - RREG32(mmHDP_ADDR_CONFIG)); - dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", - RREG32(mmDMIF_ADDR_CALC)); - - dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", - RREG32(mmCP_MEQ_THRESHOLDS)); - dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n", - RREG32(mmSX_DEBUG_1)); - dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n", - RREG32(mmTA_CNTL_AUX)); - dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n", - RREG32(mmSPI_CONFIG_CNTL)); - dev_info(adev->dev, " SQ_CONFIG=0x%08X\n", - RREG32(mmSQ_CONFIG)); - dev_info(adev->dev, " DB_DEBUG=0x%08X\n", - RREG32(mmDB_DEBUG)); - dev_info(adev->dev, " DB_DEBUG2=0x%08X\n", - RREG32(mmDB_DEBUG2)); - dev_info(adev->dev, " DB_DEBUG3=0x%08X\n", - RREG32(mmDB_DEBUG3)); - dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n", - RREG32(mmCB_HW_CONTROL)); - dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n", - RREG32(mmSPI_CONFIG_CNTL_1)); - dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n", - RREG32(mmPA_SC_FIFO_SIZE)); - dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n", - RREG32(mmVGT_NUM_INSTANCES)); - dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n", - RREG32(mmCP_PERFMON_CNTL)); - dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n", - RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS)); - dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n", - RREG32(mmVGT_CACHE_INVALIDATION)); - dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n", - RREG32(mmVGT_GS_VERTEX_REUSE)); - dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n", - RREG32(mmPA_SC_LINE_STIPPLE_STATE)); - dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n", - RREG32(mmPA_CL_ENHANCE)); - dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n", - RREG32(mmPA_SC_ENHANCE)); - - dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n", - RREG32(mmCP_ME_CNTL)); - dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n", - RREG32(mmCP_MAX_CONTEXT)); - dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n", - RREG32(mmCP_ENDIAN_SWAP)); - dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n", - RREG32(mmCP_DEVICE_ID)); - - dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n", - RREG32(mmCP_SEM_WAIT_TIMER)); - if (adev->asic_type != CHIP_HAWAII) - dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n", - RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL)); - - dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n", - RREG32(mmCP_RB_WPTR_DELAY)); - dev_info(adev->dev, " CP_RB_VMID=0x%08X\n", - RREG32(mmCP_RB_VMID)); - dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", - RREG32(mmCP_RB0_CNTL)); - dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n", - RREG32(mmCP_RB0_WPTR)); - dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n", - RREG32(mmCP_RB0_RPTR_ADDR)); - dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n", - RREG32(mmCP_RB0_RPTR_ADDR_HI)); - dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", - RREG32(mmCP_RB0_CNTL)); - dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n", - RREG32(mmCP_RB0_BASE)); - dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n", - RREG32(mmCP_RB0_BASE_HI)); - dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n", - RREG32(mmCP_MEC_CNTL)); - dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n", - RREG32(mmCP_CPF_DEBUG)); - - dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n", - RREG32(mmSCRATCH_ADDR)); - dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n", - RREG32(mmSCRATCH_UMSK)); - - /* init the pipes */ - mutex_lock(&adev->srbm_mutex); - for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) { - int me = (i < 4) ? 1 : 2; - int pipe = (i < 4) ? i : (i - 4); - int queue; - - dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe); - cik_srbm_select(adev, me, pipe, 0, 0); - dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n", - RREG32(mmCP_HPD_EOP_BASE_ADDR)); - dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n", - RREG32(mmCP_HPD_EOP_BASE_ADDR_HI)); - dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n", - RREG32(mmCP_HPD_EOP_VMID)); - dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", - RREG32(mmCP_HPD_EOP_CONTROL)); - - for (queue = 0; queue < 8; queue++) { - cik_srbm_select(adev, me, pipe, queue, 0); - dev_info(adev->dev, " queue: %d\n", queue); - dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", - RREG32(mmCP_PQ_WPTR_POLL_CNTL)); - dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n", - RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL)); - dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n", - RREG32(mmCP_HQD_ACTIVE)); - dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n", - RREG32(mmCP_HQD_DEQUEUE_REQUEST)); - dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n", - RREG32(mmCP_HQD_PQ_RPTR)); - dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n", - RREG32(mmCP_HQD_PQ_WPTR)); - dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n", - RREG32(mmCP_HQD_PQ_BASE)); - dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n", - RREG32(mmCP_HQD_PQ_BASE_HI)); - dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n", - RREG32(mmCP_HQD_PQ_CONTROL)); - dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n", - RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR)); - dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n", - RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI)); - dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n", - RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR)); - dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n", - RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI)); - dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n", - RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL)); - dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n", - RREG32(mmCP_HQD_PQ_WPTR)); - dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n", - RREG32(mmCP_HQD_VMID)); - dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n", - RREG32(mmCP_MQD_BASE_ADDR)); - dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n", - RREG32(mmCP_MQD_BASE_ADDR_HI)); - dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n", - RREG32(mmCP_MQD_CONTROL)); - } - } - cik_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); - - dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n", - RREG32(mmCP_INT_CNTL_RING0)); - dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", - RREG32(mmRLC_LB_CNTL)); - dev_info(adev->dev, " RLC_CNTL=0x%08X\n", - RREG32(mmRLC_CNTL)); - dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n", - RREG32(mmRLC_CGCG_CGLS_CTRL)); - dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n", - RREG32(mmRLC_LB_CNTR_INIT)); - dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n", - RREG32(mmRLC_LB_CNTR_MAX)); - dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n", - RREG32(mmRLC_LB_INIT_CU_MASK)); - dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n", - RREG32(mmRLC_LB_PARAMS)); - dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", - RREG32(mmRLC_LB_CNTL)); - dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n", - RREG32(mmRLC_MC_CNTL)); - dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n", - RREG32(mmRLC_UCODE_CNTL)); - - if (adev->asic_type == CHIP_BONAIRE) - dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n", - RREG32(mmRLC_DRIVER_CPDMA_STATUS)); - - mutex_lock(&adev->srbm_mutex); - for (i = 0; i < 16; i++) { - cik_srbm_select(adev, 0, 0, 0, i); - dev_info(adev->dev, " VM %d:\n", i); - dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n", - RREG32(mmSH_MEM_CONFIG)); - dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n", - RREG32(mmSH_MEM_APE1_BASE)); - dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n", - RREG32(mmSH_MEM_APE1_LIMIT)); - dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n", - RREG32(mmSH_MEM_BASES)); - } - cik_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} - static int gfx_v7_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; @@ -4855,7 +4621,6 @@ static int gfx_v7_0_soft_reset(void *handle) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; if (grbm_soft_reset || srbm_soft_reset) { - gfx_v7_0_print_status((void *)adev); /* disable CG/PG */ gfx_v7_0_fini_pg(adev); gfx_v7_0_update_cg(adev, false); @@ -4898,7 +4663,6 @@ static int gfx_v7_0_soft_reset(void *handle) } /* Wait a little for things to settle down */ udelay(50); - gfx_v7_0_print_status((void *)adev); } return 0; } @@ -5074,7 +4838,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - if ((ring->me == me_id) & (ring->pipe == pipe_id)) + if ((ring->me == me_id) && (ring->pipe == pipe_id)) amdgpu_fence_process(ring); } break; @@ -5150,6 +4914,7 @@ static int gfx_v7_0_set_powergating_state(void *handle, } const struct amd_ip_funcs gfx_v7_0_ip_funcs = { + .name = "gfx_v7_0", .early_init = gfx_v7_0_early_init, .late_init = gfx_v7_0_late_init, .sw_init = gfx_v7_0_sw_init, @@ -5161,7 +4926,6 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .is_idle = gfx_v7_0_is_idle, .wait_for_idle = gfx_v7_0_wait_for_idle, .soft_reset = gfx_v7_0_soft_reset, - .print_status = gfx_v7_0_print_status, .set_clockgating_state = gfx_v7_0_set_clockgating_state, .set_powergating_state = gfx_v7_0_set_powergating_state, }; @@ -5268,24 +5032,27 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) } -int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info) +static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; - - if (!adev || !cu_info) - return -EINVAL; + struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v7_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v7_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -5301,11 +5068,9 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; - - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index c04bfbabfc88..94e3ea147c26 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h @@ -26,12 +26,4 @@ extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev); -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev); -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); -int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f0c7b3596480..bff8668e9e6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -27,6 +27,8 @@ #include "vi.h" #include "vid.h" #include "amdgpu_ucode.h" +#include "amdgpu_atombios.h" +#include "atombios_i2c.h" #include "clearstate_vi.h" #include "gmc/gmc_8_2_d.h" @@ -46,11 +48,14 @@ #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" +#include "smu/smu_7_1_3_d.h" + #define GFX8_NUM_GFX_RINGS 1 #define GFX8_NUM_COMPUTE_RINGS 8 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001 +#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002 #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003 #define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT) @@ -84,6 +89,8 @@ enum { BPM_REG_FGCG_MAX }; +#define RLC_FormatDirectRegListLength 14 + MODULE_FIRMWARE("amdgpu/carrizo_ce.bin"); MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin"); MODULE_FIRMWARE("amdgpu/carrizo_me.bin"); @@ -117,6 +124,20 @@ MODULE_FIRMWARE("amdgpu/fiji_mec.bin"); MODULE_FIRMWARE("amdgpu/fiji_mec2.bin"); MODULE_FIRMWARE("amdgpu/fiji_rlc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/polaris10_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); + static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, @@ -247,6 +268,69 @@ static const u32 tonga_mgcg_cgcg_init[] = mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, }; +static const u32 golden_settings_polaris11_a11[] = +{ + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, + mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012, + mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, + mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, + mmSQ_CONFIG, 0x07f80000, 0x07180000, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210, + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, +}; + +static const u32 polaris11_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, +}; + +static const u32 golden_settings_polaris10_a11[] = +{ + mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0, 0x0f000000, + mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012, + mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, + mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, + mmSQ_CONFIG, 0x07f80000, 0x07180000, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, +}; + +static const u32 polaris10_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, +}; + static const u32 fiji_golden_common_all[] = { mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, @@ -527,7 +611,7 @@ static const u32 stoney_golden_settings_a11[] = mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, - mmTCC_CTRL, 0x00100000, 0xf31fff7f, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010, @@ -558,6 +642,9 @@ static const u32 stoney_mgcg_cgcg_init[] = static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); +static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev); +static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev); +static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev); static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) { @@ -596,6 +683,27 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) tonga_golden_common_all, (const u32)ARRAY_SIZE(tonga_golden_common_all)); break; + case CHIP_POLARIS11: + amdgpu_program_register_sequence(adev, + golden_settings_polaris11_a11, + (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); + amdgpu_program_register_sequence(adev, + polaris11_golden_common_all, + (const u32)ARRAY_SIZE(polaris11_golden_common_all)); + break; + case CHIP_POLARIS10: + amdgpu_program_register_sequence(adev, + golden_settings_polaris10_a11, + (const u32)ARRAY_SIZE(golden_settings_polaris10_a11)); + amdgpu_program_register_sequence(adev, + polaris10_golden_common_all, + (const u32)ARRAY_SIZE(polaris10_golden_common_all)); + WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); + if (adev->pdev->revision == 0xc7) { + amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); + amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); + } + break; case CHIP_CARRIZO: amdgpu_program_register_sequence(adev, cz_mgcg_cgcg_init, @@ -679,26 +787,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) return r; } -static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -706,32 +813,29 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; goto err2; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; + } + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: @@ -739,6 +843,26 @@ err1: return r; } + +static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + if ((adev->asic_type != CHIP_STONEY) && + (adev->asic_type != CHIP_TOPAZ)) + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + + kfree(adev->gfx.rlc.register_list_format); +} + static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; @@ -747,6 +871,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; const struct gfx_firmware_header_v1_0 *cp_hdr; + const struct rlc_firmware_header_v2_0 *rlc_hdr; + unsigned int *tmp = NULL, i; DRM_DEBUG("\n"); @@ -763,6 +889,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_FIJI: chip_name = "fiji"; break; + case CHIP_POLARIS11: + chip_name = "polaris11"; + break; + case CHIP_POLARIS10: + chip_name = "polaris10"; + break; case CHIP_STONEY: chip_name = "stoney"; break; @@ -808,9 +940,49 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); - cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; - adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); - adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); + + adev->gfx.rlc.save_and_restore_offset = + le32_to_cpu(rlc_hdr->save_and_restore_offset); + adev->gfx.rlc.clear_state_descriptor_offset = + le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); + adev->gfx.rlc.avail_scratch_ram_locations = + le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); + adev->gfx.rlc.reg_restore_list_size = + le32_to_cpu(rlc_hdr->reg_restore_list_size); + adev->gfx.rlc.reg_list_format_start = + le32_to_cpu(rlc_hdr->reg_list_format_start); + adev->gfx.rlc.reg_list_format_separate_start = + le32_to_cpu(rlc_hdr->reg_list_format_separate_start); + adev->gfx.rlc.starting_offsets_start = + le32_to_cpu(rlc_hdr->starting_offsets_start); + adev->gfx.rlc.reg_list_format_size_bytes = + le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); + adev->gfx.rlc.reg_list_size_bytes = + le32_to_cpu(rlc_hdr->reg_list_size_bytes); + + adev->gfx.rlc.register_list_format = + kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + + adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); + + if (!adev->gfx.rlc.register_list_format) { + err = -ENOMEM; + goto out; + } + + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) + adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); + + adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; + + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) + adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); @@ -911,6 +1083,270 @@ out: return err; } +static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, + volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (adev->gfx.rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - + PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } else { + return; + } + } + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - + PACKET3_SET_CONTEXT_REG_START); + switch (adev->asic_type) { + case CHIP_TONGA: + case CHIP_POLARIS10: + buffer[count++] = cpu_to_le32(0x16000012); + buffer[count++] = cpu_to_le32(0x0000002A); + break; + case CHIP_POLARIS11: + buffer[count++] = cpu_to_le32(0x16000012); + buffer[count++] = cpu_to_le32(0x00000000); + break; + case CHIP_FIJI: + buffer[count++] = cpu_to_le32(0x3a00161a); + buffer[count++] = cpu_to_le32(0x0000002e); + break; + case CHIP_TOPAZ: + case CHIP_CARRIZO: + buffer[count++] = cpu_to_le32(0x00000002); + buffer[count++] = cpu_to_le32(0x00000000); + break; + case CHIP_STONEY: + buffer[count++] = cpu_to_le32(0x00000000); + buffer[count++] = cpu_to_le32(0x00000000); + break; + default: + buffer[count++] = cpu_to_le32(0x00000000); + buffer[count++] = cpu_to_le32(0x00000000); + break; + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); +} + +static void cz_init_cp_jump_table(struct amdgpu_device *adev) +{ + const __le32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me = 4; + u32 bo_offset = 0; + u32 table_offset, table_size; + + if (adev->asic_type == CHIP_CARRIZO) + max_me = 5; + + /* write the cp table buffer */ + dst_ptr = adev->gfx.rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 4) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + + bo_offset += table_size; + } +} + +static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) +{ + int r; + + /* clear state block */ + if (adev->gfx.rlc.clear_state_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + adev->gfx.rlc.clear_state_obj = NULL; + } + + /* jump table block */ + if (adev->gfx.rlc.cp_table_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); + adev->gfx.rlc.cp_table_obj = NULL; + } +} + +static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) +{ + volatile u32 *dst_ptr; + u32 dws; + const struct cs_section_def *cs_data; + int r; + + adev->gfx.rlc.cs_data = vi_cs_data; + + cs_data = adev->gfx.rlc.cs_data; + + if (cs_data) { + /* clear state block */ + adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); + + if (adev->gfx.rlc.clear_state_obj == NULL) { + r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.clear_state_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); + gfx_v8_0_rlc_fini(adev); + return r; + } + } + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) { + gfx_v8_0_rlc_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + gfx_v8_0_rlc_fini(adev); + return r; + } + + r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); + gfx_v8_0_rlc_fini(adev); + return r; + } + /* set up the cs buffer */ + dst_ptr = adev->gfx.rlc.cs_ptr; + gfx_v8_0_get_csb_buffer(adev, dst_ptr); + amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + } + + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ + if (adev->gfx.rlc.cp_table_obj == NULL) { + r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.cp_table_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) { + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + return r; + } + r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + return r; + } + + cz_init_cp_jump_table(adev); + + amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + } + + return 0; +} + static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { int r; @@ -1262,7 +1698,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); /* shedule the ib on the ring */ - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) { DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); goto fail; @@ -1289,19 +1725,19 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) RREG32(sec_ded_counter_registers[i]); fail: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); return r; } -static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) +static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; u32 mc_shared_chmap, mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; + int ret; switch (adev->asic_type) { case CHIP_TOPAZ: @@ -1338,6 +1774,34 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_POLARIS11: + ret = amdgpu_atombios_get_gfx_info(adev); + if (ret) + return ret; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_POLARIS10: + ret = amdgpu_atombios_get_gfx_info(adev); + if (ret) + return ret; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; + break; case CHIP_TONGA: adev->gfx.config.max_shader_engines = 4; adev->gfx.config.max_tile_pipes = 8; @@ -1520,6 +1984,8 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) break; } adev->gfx.config.gb_addr_config = gb_addr_config; + + return 0; } static int gfx_v8_0_sw_init(void *handle) @@ -1553,6 +2019,12 @@ static int gfx_v8_0_sw_init(void *handle) return r; } + r = gfx_v8_0_rlc_init(adev); + if (r) { + DRM_ERROR("Failed to init rlc BOs!\n"); + return r; + } + r = gfx_v8_0_mec_init(adev); if (r) { DRM_ERROR("Failed to init MEC BOs!\n"); @@ -1570,7 +2042,7 @@ static int gfx_v8_0_sw_init(void *handle) ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; } - r = amdgpu_ring_init(adev, ring, 1024 * 1024, + r = amdgpu_ring_init(adev, ring, 1024, PACKET3(PACKET3_NOP, 0x3FFF), 0xf, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, AMDGPU_RING_TYPE_GFX); @@ -1594,10 +2066,10 @@ static int gfx_v8_0_sw_init(void *handle) ring->me = 1; /* first MEC */ ring->pipe = i / 8; ring->queue = i % 8; - sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); + sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; /* type-2 packets are deprecated on MEC, use type-3 instead */ - r = amdgpu_ring_init(adev, ring, 1024 * 1024, + r = amdgpu_ring_init(adev, ring, 1024, PACKET3(PACKET3_NOP, 0x3FFF), 0xf, &adev->gfx.eop_irq, irq_type, AMDGPU_RING_TYPE_COMPUTE); @@ -1629,7 +2101,9 @@ static int gfx_v8_0_sw_init(void *handle) adev->gfx.ce_ram_size = 0x8000; - gfx_v8_0_gpu_early_init(adev); + r = gfx_v8_0_gpu_early_init(adev); + if (r) + return r; return 0; } @@ -1650,6 +2124,10 @@ static int gfx_v8_0_sw_fini(void *handle) gfx_v8_0_mec_fini(adev); + gfx_v8_0_rlc_fini(adev); + + gfx_v8_0_free_microcode(adev); + return 0; } @@ -2219,6 +2697,410 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); break; + case CHIP_POLARIS11: + modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P4_16x16)); + modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + + mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); + + break; + case CHIP_POLARIS10: + modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); + modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + + mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + + mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); + + break; case CHIP_STONEY: modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | @@ -2569,9 +3451,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } } -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); @@ -2621,13 +3509,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -2695,6 +3583,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) gfx_v8_0_tiling_mode_table_init(adev); gfx_v8_0_setup_rb(adev); + gfx_v8_0_get_cu_info(adev); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -2730,7 +3619,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmPA_SC_FIFO_SIZE, (adev->gfx.config.sc_prim_fifo_size_frontend << @@ -2753,7 +3642,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -2761,7 +3650,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -2788,6 +3677,252 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, WREG32(mmCP_INT_CNTL_RING0, tmp); } +static void gfx_v8_0_init_csb(struct amdgpu_device *adev) +{ + /* csib */ + WREG32(mmRLC_CSIB_ADDR_HI, + adev->gfx.rlc.clear_state_gpu_addr >> 32); + WREG32(mmRLC_CSIB_ADDR_LO, + adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); + WREG32(mmRLC_CSIB_LENGTH, + adev->gfx.rlc.clear_state_size); +} + +static void gfx_v8_0_parse_ind_reg_list(int *register_list_format, + int ind_offset, + int list_size, + int *unique_indices, + int *indices_count, + int max_indices, + int *ind_start_offsets, + int *offset_count, + int max_offset) +{ + int indices; + bool new_entry = true; + + for (; ind_offset < list_size; ind_offset++) { + + if (new_entry) { + new_entry = false; + ind_start_offsets[*offset_count] = ind_offset; + *offset_count = *offset_count + 1; + BUG_ON(*offset_count >= max_offset); + } + + if (register_list_format[ind_offset] == 0xFFFFFFFF) { + new_entry = true; + continue; + } + + ind_offset += 2; + + /* look for the matching indice */ + for (indices = 0; + indices < *indices_count; + indices++) { + if (unique_indices[indices] == + register_list_format[ind_offset]) + break; + } + + if (indices >= *indices_count) { + unique_indices[*indices_count] = + register_list_format[ind_offset]; + indices = *indices_count; + *indices_count = *indices_count + 1; + BUG_ON(*indices_count >= max_indices); + } + + register_list_format[ind_offset] = indices; + } +} + +static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) +{ + int i, temp, data; + int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0}; + int indices_count = 0; + int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + int offset_count = 0; + + int list_size; + unsigned int *register_list_format = + kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); + if (register_list_format == NULL) + return -ENOMEM; + memcpy(register_list_format, adev->gfx.rlc.register_list_format, + adev->gfx.rlc.reg_list_format_size_bytes); + + gfx_v8_0_parse_ind_reg_list(register_list_format, + RLC_FormatDirectRegListLength, + adev->gfx.rlc.reg_list_format_size_bytes >> 2, + unique_indices, + &indices_count, + sizeof(unique_indices) / sizeof(int), + indirect_start_offsets, + &offset_count, + sizeof(indirect_start_offsets)/sizeof(int)); + + /* save and restore list */ + temp = RREG32(mmRLC_SRM_CNTL); + temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; + WREG32(mmRLC_SRM_CNTL, temp); + + WREG32(mmRLC_SRM_ARAM_ADDR, 0); + for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) + WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]); + + /* indirect list */ + WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start); + for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++) + WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]); + + list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; + list_size = list_size >> 1; + WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size); + WREG32(mmRLC_GPM_SCRATCH_DATA, list_size); + + /* starting offsets starts */ + WREG32(mmRLC_GPM_SCRATCH_ADDR, + adev->gfx.rlc.starting_offsets_start); + for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++) + WREG32(mmRLC_GPM_SCRATCH_DATA, + indirect_start_offsets[i]); + + /* unique indices */ + temp = mmRLC_SRM_INDEX_CNTL_ADDR_0; + data = mmRLC_SRM_INDEX_CNTL_DATA_0; + for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) { + amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false); + amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false); + } + kfree(register_list_format); + + return 0; +} + +static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32(mmRLC_SRM_CNTL); + data |= RLC_SRM_CNTL__SRM_ENABLE_MASK; + WREG32(mmRLC_SRM_CNTL, data); +} + +static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG)) { + data = RREG32(mmCP_RB_WPTR_POLL_CNTL); + data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; + data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); + WREG32(mmCP_RB_WPTR_POLL_CNTL, data); + + data = 0; + data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); + data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); + data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); + data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); + WREG32(mmRLC_PG_DELAY, data); + + data = RREG32(mmRLC_PG_DELAY_2); + data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; + data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); + WREG32(mmRLC_PG_DELAY_2, data); + + data = RREG32(mmRLC_AUTO_PG_CTRL); + data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; + data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); + WREG32(mmRLC_AUTO_PG_CTRL, data); + } +} + +static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; + else + data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void gfx_v8_0_init_pg(struct amdgpu_device *adev) +{ + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { + gfx_v8_0_init_csb(adev); + gfx_v8_0_init_save_restore_list(adev); + gfx_v8_0_enable_save_restore_machine(adev); + + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); + gfx_v8_0_init_power_gating(adev); + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + cz_enable_sck_slow_down_on_power_up(adev, true); + cz_enable_sck_slow_down_on_power_down(adev, true); + } else { + cz_enable_sck_slow_down_on_power_up(adev, false); + cz_enable_sck_slow_down_on_power_down(adev, false); + } + if (adev->pg_flags & AMD_PG_SUPPORT_CP) + cz_enable_cp_power_gating(adev, true); + else + cz_enable_cp_power_gating(adev, false); + } else if (adev->asic_type == CHIP_POLARIS11) { + gfx_v8_0_init_power_gating(adev); + } + } +} + void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) { u32 tmp = RREG32(mmRLC_CNTL); @@ -2858,12 +3993,17 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32(mmRLC_CGCG_CGLS_CTRL, 0); + if (adev->asic_type == CHIP_POLARIS11 || + adev->asic_type == CHIP_POLARIS10) + WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); /* disable PG */ WREG32(mmRLC_PG_CNTL, 0); gfx_v8_0_rlc_reset(adev); + gfx_v8_0_init_pg(adev); + if (!adev->pp_enabled) { if (!adev->firmware.smu_load) { /* legacy rlc firmware loading */ @@ -3035,18 +4175,27 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); switch (adev->asic_type) { case CHIP_TONGA: + case CHIP_POLARIS10: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x0000002A); break; + case CHIP_POLARIS11: + amdgpu_ring_write(ring, 0x16000012); + amdgpu_ring_write(ring, 0x00000000); + break; case CHIP_FIJI: amdgpu_ring_write(ring, 0x3a00161a); amdgpu_ring_write(ring, 0x0000002e); break; - case CHIP_TOPAZ: case CHIP_CARRIZO: amdgpu_ring_write(ring, 0x00000002); amdgpu_ring_write(ring, 0x00000000); break; + case CHIP_TOPAZ: + amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? + 0x00000000 : 0x00000002); + amdgpu_ring_write(ring, 0x00000000); + break; case CHIP_STONEY: amdgpu_ring_write(ring, 0x00000000); amdgpu_ring_write(ring, 0x00000000); @@ -3122,6 +4271,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_OFFSET, ring->doorbell_index); tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 1); } else { tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, @@ -3679,7 +4830,9 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) if (use_doorbell) { if ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_FIJI) || - (adev->asic_type == CHIP_STONEY)) { + (adev->asic_type == CHIP_STONEY) || + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS10)) { WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, @@ -3713,7 +4866,9 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); WREG32(mmCP_HQD_PERSISTENT_STATE, tmp); mqd->cp_hqd_persistent_state = tmp; - if (adev->asic_type == CHIP_STONEY) { + if (adev->asic_type == CHIP_STONEY || + adev->asic_type == CHIP_POLARIS11 || + adev->asic_type == CHIP_POLARIS10) { tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1); WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp); @@ -3845,6 +5000,9 @@ static int gfx_v8_0_hw_fini(void *handle) gfx_v8_0_rlc_stop(adev); gfx_v8_0_cp_compute_fini(adev); + amdgpu_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + return 0; } @@ -3889,185 +5047,6 @@ static int gfx_v8_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void gfx_v8_0_print_status(void *handle) -{ - int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "GFX 8.x registers\n"); - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(mmGRBM_STATUS)); - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(mmGRBM_STATUS2)); - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(mmGRBM_STATUS_SE0)); - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(mmGRBM_STATUS_SE1)); - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", - RREG32(mmGRBM_STATUS_SE2)); - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", - RREG32(mmGRBM_STATUS_SE3)); - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT1)); - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT2)); - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT3)); - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", - RREG32(mmCP_CPF_BUSY_STAT)); - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPF_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPC_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); - - for (i = 0; i < 32; i++) { - dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n", - i, RREG32(mmGB_TILE_MODE0 + (i * 4))); - } - for (i = 0; i < 16; i++) { - dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n", - i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4))); - } - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - dev_info(adev->dev, " se: %d\n", i); - gfx_v8_0_select_se_sh(adev, i, 0xffffffff); - dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n", - RREG32(mmPA_SC_RASTER_CONFIG)); - dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n", - RREG32(mmPA_SC_RASTER_CONFIG_1)); - } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); - - dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n", - RREG32(mmGB_ADDR_CONFIG)); - dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n", - RREG32(mmHDP_ADDR_CONFIG)); - dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", - RREG32(mmDMIF_ADDR_CALC)); - - dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", - RREG32(mmCP_MEQ_THRESHOLDS)); - dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n", - RREG32(mmSX_DEBUG_1)); - dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n", - RREG32(mmTA_CNTL_AUX)); - dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n", - RREG32(mmSPI_CONFIG_CNTL)); - dev_info(adev->dev, " SQ_CONFIG=0x%08X\n", - RREG32(mmSQ_CONFIG)); - dev_info(adev->dev, " DB_DEBUG=0x%08X\n", - RREG32(mmDB_DEBUG)); - dev_info(adev->dev, " DB_DEBUG2=0x%08X\n", - RREG32(mmDB_DEBUG2)); - dev_info(adev->dev, " DB_DEBUG3=0x%08X\n", - RREG32(mmDB_DEBUG3)); - dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n", - RREG32(mmCB_HW_CONTROL)); - dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n", - RREG32(mmSPI_CONFIG_CNTL_1)); - dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n", - RREG32(mmPA_SC_FIFO_SIZE)); - dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n", - RREG32(mmVGT_NUM_INSTANCES)); - dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n", - RREG32(mmCP_PERFMON_CNTL)); - dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n", - RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS)); - dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n", - RREG32(mmVGT_CACHE_INVALIDATION)); - dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n", - RREG32(mmVGT_GS_VERTEX_REUSE)); - dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n", - RREG32(mmPA_SC_LINE_STIPPLE_STATE)); - dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n", - RREG32(mmPA_CL_ENHANCE)); - dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n", - RREG32(mmPA_SC_ENHANCE)); - - dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n", - RREG32(mmCP_ME_CNTL)); - dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n", - RREG32(mmCP_MAX_CONTEXT)); - dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n", - RREG32(mmCP_ENDIAN_SWAP)); - dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n", - RREG32(mmCP_DEVICE_ID)); - - dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n", - RREG32(mmCP_SEM_WAIT_TIMER)); - - dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n", - RREG32(mmCP_RB_WPTR_DELAY)); - dev_info(adev->dev, " CP_RB_VMID=0x%08X\n", - RREG32(mmCP_RB_VMID)); - dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", - RREG32(mmCP_RB0_CNTL)); - dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n", - RREG32(mmCP_RB0_WPTR)); - dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n", - RREG32(mmCP_RB0_RPTR_ADDR)); - dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n", - RREG32(mmCP_RB0_RPTR_ADDR_HI)); - dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", - RREG32(mmCP_RB0_CNTL)); - dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n", - RREG32(mmCP_RB0_BASE)); - dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n", - RREG32(mmCP_RB0_BASE_HI)); - dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n", - RREG32(mmCP_MEC_CNTL)); - dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n", - RREG32(mmCP_CPF_DEBUG)); - - dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n", - RREG32(mmSCRATCH_ADDR)); - dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n", - RREG32(mmSCRATCH_UMSK)); - - dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n", - RREG32(mmCP_INT_CNTL_RING0)); - dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", - RREG32(mmRLC_LB_CNTL)); - dev_info(adev->dev, " RLC_CNTL=0x%08X\n", - RREG32(mmRLC_CNTL)); - dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n", - RREG32(mmRLC_CGCG_CGLS_CTRL)); - dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n", - RREG32(mmRLC_LB_CNTR_INIT)); - dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n", - RREG32(mmRLC_LB_CNTR_MAX)); - dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n", - RREG32(mmRLC_LB_INIT_CU_MASK)); - dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n", - RREG32(mmRLC_LB_PARAMS)); - dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", - RREG32(mmRLC_LB_CNTL)); - dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n", - RREG32(mmRLC_MC_CNTL)); - dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n", - RREG32(mmRLC_UCODE_CNTL)); - - mutex_lock(&adev->srbm_mutex); - for (i = 0; i < 16; i++) { - vi_srbm_select(adev, 0, 0, 0, i); - dev_info(adev->dev, " VM %d:\n", i); - dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n", - RREG32(mmSH_MEM_CONFIG)); - dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n", - RREG32(mmSH_MEM_APE1_BASE)); - dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n", - RREG32(mmSH_MEM_APE1_LIMIT)); - dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n", - RREG32(mmSH_MEM_BASES)); - } - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} - static int gfx_v8_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; @@ -4108,7 +5087,6 @@ static int gfx_v8_0_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); if (grbm_soft_reset || srbm_soft_reset) { - gfx_v8_0_print_status((void *)adev); /* stop the rlc */ gfx_v8_0_rlc_stop(adev); @@ -4168,7 +5146,6 @@ static int gfx_v8_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - gfx_v8_0_print_status((void *)adev); } return 0; } @@ -4181,7 +5158,7 @@ static int gfx_v8_0_soft_reset(void *handle) * Fetches a GPU clock counter snapshot. * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -4241,15 +5218,22 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v8_0_select_se_sh, +}; + static int gfx_v8_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v8_0_gfx_funcs; gfx_v8_0_set_ring_funcs(adev); gfx_v8_0_set_irq_funcs(adev); gfx_v8_0_set_gds_init(adev); + gfx_v8_0_set_rlc_funcs(adev); return 0; } @@ -4272,27 +5256,177 @@ static int gfx_v8_0_late_init(void *handle) if (r) return r; + amdgpu_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE); + return 0; } +static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, temp; + + if (adev->asic_type == CHIP_POLARIS11) + /* Send msg to SMU via Powerplay */ + amdgpu_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + enable ? + AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); + + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable static MGPG */ + if (enable) + data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; + + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, temp; + + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable dynamic MGPG */ + if (enable) + data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; + + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, temp; + + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable quick PG */ + if (enable) + data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; + + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); + + /* Read any GFX register to wake up GFX. */ + if (!enable) + data = RREG32(mmDB_RENDER_CONTROL); +} + +static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { + cz_enable_gfx_cg_power_gating(adev, true); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) + cz_enable_gfx_pipeline_power_gating(adev, true); + } else { + cz_enable_gfx_cg_power_gating(adev, false); + cz_enable_gfx_pipeline_power_gating(adev, false); + } +} + static int gfx_v8_0_set_powergating_state(void *handle, enum amd_powergating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + + if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) + return 0; + + switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) + cz_update_gfx_cg_power_gating(adev, enable); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + break; + case CHIP_POLARIS11: + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable) + polaris11_enable_gfx_quick_mg_power_gating(adev, true); + else + polaris11_enable_gfx_quick_mg_power_gating(adev, false); + break; + default: + break; + } + return 0; } -static void fiji_send_serdes_cmd(struct amdgpu_device *adev, - uint32_t reg_addr, uint32_t cmd) +static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, + uint32_t reg_addr, uint32_t cmd) { uint32_t data; - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RREG32(mmRLC_SERDES_WR_CTRL); - data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | + if (adev->asic_type == CHIP_STONEY) + data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | RLC_SERDES_WR_CTRL__P1_SELECT_MASK | RLC_SERDES_WR_CTRL__P2_SELECT_MASK | @@ -4300,42 +5434,218 @@ static void fiji_send_serdes_cmd(struct amdgpu_device *adev, RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | RLC_SERDES_WR_CTRL__POWER_UP_MASK | RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | - RLC_SERDES_WR_CTRL__BPM_DATA_MASK | - RLC_SERDES_WR_CTRL__REG_ADDR_MASK | RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); + else + data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | + RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | + RLC_SERDES_WR_CTRL__P1_SELECT_MASK | + RLC_SERDES_WR_CTRL__P2_SELECT_MASK | + RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | + RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | + RLC_SERDES_WR_CTRL__POWER_UP_MASK | + RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | + RLC_SERDES_WR_CTRL__BPM_DATA_MASK | + RLC_SERDES_WR_CTRL__REG_ADDR_MASK | + RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK | - (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) | - (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) | - (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT)); + (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) | + (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) | + (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT)); WREG32(mmRLC_SERDES_WR_CTRL, data); } -static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable) +#define MSG_ENTER_RLC_SAFE_MODE 1 +#define MSG_EXIT_RLC_SAFE_MODE 0 + +#define RLC_GPR_REG2__REQ_MASK 0x00000001 +#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 +#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e + +static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) +{ + u32 data = 0; + unsigned i; + + data = RREG32(mmRLC_CNTL); + if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0) + return; + + if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) || + (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG))) { + data |= RLC_GPR_REG2__REQ_MASK; + data &= ~RLC_GPR_REG2__MESSAGE_MASK; + data |= (MSG_ENTER_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT); + WREG32(mmRLC_GPR_REG2, data); + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPM_STAT) & + (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | + RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == + (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | + RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) + break; + udelay(1); + } + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + break; + udelay(1); + } + adev->gfx.rlc.in_safe_mode = true; + } +} + +static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev) +{ + u32 data; + unsigned i; + + data = RREG32(mmRLC_CNTL); + if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0) + return; + + if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) || + (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG))) { + data |= RLC_GPR_REG2__REQ_MASK; + data &= ~RLC_GPR_REG2__MESSAGE_MASK; + data |= (MSG_EXIT_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT); + WREG32(mmRLC_GPR_REG2, data); + adev->gfx.rlc.in_safe_mode = false; + } + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + break; + udelay(1); + } +} + +static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) +{ + u32 data; + unsigned i; + + data = RREG32(mmRLC_CNTL); + if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) + return; + + if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { + data |= RLC_SAFE_MODE__CMD_MASK; + data &= ~RLC_SAFE_MODE__MESSAGE_MASK; + data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); + WREG32(mmRLC_SAFE_MODE, data); + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPM_STAT) & + (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | + RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == + (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | + RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) + break; + udelay(1); + } + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + break; + udelay(1); + } + adev->gfx.rlc.in_safe_mode = true; + } +} + +static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) +{ + u32 data = 0; + unsigned i; + + data = RREG32(mmRLC_CNTL); + if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) + return; + + if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { + if (adev->gfx.rlc.in_safe_mode) { + data |= RLC_SAFE_MODE__CMD_MASK; + data &= ~RLC_SAFE_MODE__MESSAGE_MASK; + WREG32(mmRLC_SAFE_MODE, data); + adev->gfx.rlc.in_safe_mode = false; + } + } + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + break; + udelay(1); + } +} + +static void gfx_v8_0_nop_enter_rlc_safe_mode(struct amdgpu_device *adev) +{ + adev->gfx.rlc.in_safe_mode = true; +} + +static void gfx_v8_0_nop_exit_rlc_safe_mode(struct amdgpu_device *adev) +{ + adev->gfx.rlc.in_safe_mode = false; +} + +static const struct amdgpu_rlc_funcs cz_rlc_funcs = { + .enter_safe_mode = cz_enter_rlc_safe_mode, + .exit_safe_mode = cz_exit_rlc_safe_mode +}; + +static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { + .enter_safe_mode = iceland_enter_rlc_safe_mode, + .exit_safe_mode = iceland_exit_rlc_safe_mode +}; + +static const struct amdgpu_rlc_funcs gfx_v8_0_nop_rlc_funcs = { + .enter_safe_mode = gfx_v8_0_nop_enter_rlc_safe_mode, + .exit_safe_mode = gfx_v8_0_nop_exit_rlc_safe_mode +}; + +static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) { uint32_t temp, data; + adev->gfx.rlc.funcs->enter_safe_mode(adev); + /* It is disabled by HW by default */ - if (enable) { - /* 1 - RLC memory Light sleep */ - temp = data = RREG32(mmRLC_MEM_SLP_CNTL); - data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmRLC_MEM_SLP_CNTL, data); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { + /* 1 - RLC memory Light sleep */ + temp = data = RREG32(mmRLC_MEM_SLP_CNTL); + data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; + if (temp != data) + WREG32(mmRLC_MEM_SLP_CNTL, data); + } - /* 2 - CP memory Light sleep */ - temp = data = RREG32(mmCP_MEM_SLP_CNTL); - data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmCP_MEM_SLP_CNTL, data); + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { + /* 2 - CP memory Light sleep */ + temp = data = RREG32(mmCP_MEM_SLP_CNTL); + data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + if (temp != data) + WREG32(mmCP_MEM_SLP_CNTL, data); + } + } /* 3 - RLC_CGTT_MGCG_OVERRIDE */ temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); - data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | - RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | - RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK | - RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK); + if (adev->flags & AMD_IS_APU) + data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | + RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | + RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK); + else + data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | + RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | + RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK | + RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK); if (temp != data) WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); @@ -4344,19 +5654,23 @@ static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev, gfx_v8_0_wait_for_rlc_serdes(adev); /* 5 - clear mgcg override */ - fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD); - - /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */ - temp = data = RREG32(mmCGTS_SM_CTRL_REG); - data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK); - data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); - data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; - data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; - data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; - data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; - data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); - if (temp != data) - WREG32(mmCGTS_SM_CTRL_REG, data); + gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD); + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) { + /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */ + temp = data = RREG32(mmCGTS_SM_CTRL_REG); + data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK); + data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); + data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; + data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; + if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) && + (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS)) + data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; + data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; + data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); + if (temp != data) + WREG32(mmCGTS_SM_CTRL_REG, data); + } udelay(50); /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ @@ -4396,23 +5710,27 @@ static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev, gfx_v8_0_wait_for_rlc_serdes(adev); /* 6 - set mgcg override */ - fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD); + gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD); udelay(50); /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ gfx_v8_0_wait_for_rlc_serdes(adev); } + + adev->gfx.rlc.funcs->exit_safe_mode(adev); } -static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, + bool enable) { uint32_t temp, temp1, data, data1; temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); - if (enable) { + adev->gfx.rlc.funcs->enter_safe_mode(adev); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { /* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/ * Cmp_busy/GFX_Idle interrupts */ @@ -4427,25 +5745,29 @@ static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev, gfx_v8_0_wait_for_rlc_serdes(adev); /* 3 - clear cgcg override */ - fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD); + gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD); /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ gfx_v8_0_wait_for_rlc_serdes(adev); /* 4 - write cmd to set CGLS */ - fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD); + gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD); /* 5 - enable cgcg */ data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; - /* enable cgls*/ - data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { + /* enable cgls*/ + data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; - temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); - data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK; + temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK; - if (temp1 != data1) - WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); + if (temp1 != data1) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); + } else { + data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + } if (temp != data) WREG32(mmRLC_CGCG_CGLS_CTRL, data); @@ -4470,36 +5792,40 @@ static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev, gfx_v8_0_wait_for_rlc_serdes(adev); /* write cmd to Set CGCG Overrride */ - fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD); + gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD); /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ gfx_v8_0_wait_for_rlc_serdes(adev); /* write cmd to Clear CGLS */ - fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD); + gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD); /* disable cgcg, cgls should be disabled too. */ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | - RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); + RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); if (temp != data) WREG32(mmRLC_CGCG_CGLS_CTRL, data); } + + gfx_v8_0_wait_for_rlc_serdes(adev); + + adev->gfx.rlc.funcs->exit_safe_mode(adev); } -static int fiji_update_gfx_clock_gating(struct amdgpu_device *adev, - bool enable) +static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, + bool enable) { if (enable) { /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS) * === MGCG + MGLS + TS(CG/LS) === */ - fiji_update_medium_grain_clock_gating(adev, enable); - fiji_update_coarse_grain_clock_gating(adev, enable); + gfx_v8_0_update_medium_grain_clock_gating(adev, enable); + gfx_v8_0_update_coarse_grain_clock_gating(adev, enable); } else { /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS) * === CGCG + CGLS === */ - fiji_update_coarse_grain_clock_gating(adev, enable); - fiji_update_medium_grain_clock_gating(adev, enable); + gfx_v8_0_update_coarse_grain_clock_gating(adev, enable); + gfx_v8_0_update_medium_grain_clock_gating(adev, enable); } return 0; } @@ -4511,8 +5837,10 @@ static int gfx_v8_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_FIJI: - fiji_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + case CHIP_CARRIZO: + case CHIP_STONEY: + gfx_v8_0_update_gfx_clock_gating(adev, + state == AMD_CG_STATE_GATE ? true : false); break; default: break; @@ -4602,28 +5930,13 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) } static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - bool need_ctx_switch = ring->current_ctx != ib->ctx; u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - /* drop the CE preamble IB for the same context */ - if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) - return; - - if (need_ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (need_ctx_switch) { + if (ctx_switch) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -4633,7 +5946,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (ib->vm_id << 24); + control |= ib->length_dw | (vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -4646,25 +5959,12 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - control |= INDIRECT_BUFFER_VALID; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); - - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - - control |= ib->length_dw | (ib->vm_id << 24); - - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -4684,6 +5984,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5))); amdgpu_ring_write(ring, addr & 0xfffffffc); @@ -5022,6 +6323,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, } const struct amd_ip_funcs gfx_v8_0_ip_funcs = { + .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, .late_init = gfx_v8_0_late_init, .sw_init = gfx_v8_0_sw_init, @@ -5033,7 +6335,6 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .is_idle = gfx_v8_0_is_idle, .wait_for_idle = gfx_v8_0_wait_for_idle, .soft_reset = gfx_v8_0_soft_reset, - .print_status = gfx_v8_0_print_status, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, }; @@ -5112,6 +6413,22 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs; } +static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->gfx.rlc.funcs = &iceland_rlc_funcs; + break; + case CHIP_STONEY: + case CHIP_CARRIZO: + adev->gfx.rlc.funcs = &cz_rlc_funcs; + break; + default: + adev->gfx.rlc.funcs = &gfx_v8_0_nop_rlc_funcs; + break; + } +} + static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ @@ -5140,6 +6457,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) } } +static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -5155,24 +6486,27 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) return (~data) & mask; } -int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info) +static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; - - if (!adev || !cu_info) - return -EINVAL; + struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v8_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v8_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -5188,11 +6522,9 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; - - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index 021e05193cb9..bc82c794312c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,8 +26,6 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); -int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 82ce7d943884..d24a82bd0c7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -39,6 +39,7 @@ static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v7_0_wait_for_idle(void *handle); MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); @@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc7_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & 0x1F00; - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -311,7 +288,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -331,7 +308,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v7_0_mc_resume(adev, &save); @@ -903,14 +880,6 @@ static int gmc_v7_0_early_init(void *handle) gmc_v7_0_set_gart_funcs(adev); gmc_v7_0_set_irq_funcs(adev); - if (adev->flags & AMD_IS_APU) { - adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - } else { - u32 tmp = RREG32(mmMC_SEQ_MISC0); - tmp &= MC_SEQ_MISC0__MT__MASK; - adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); - } - return 0; } @@ -918,7 +887,10 @@ static int gmc_v7_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + else + return 0; } static int gmc_v7_0_sw_init(void *handle) @@ -927,6 +899,14 @@ static int gmc_v7_0_sw_init(void *handle) int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->flags & AMD_IS_APU) { + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + } else { + u32 tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); + } + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; @@ -1114,114 +1094,6 @@ static int gmc_v7_0_wait_for_idle(void *handle) } -static void gmc_v7_0_print_status(void *handle) -{ - int i, j; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "GMC 8.x registers\n"); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); - dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", - RREG32(mmMC_VM_MX_L1_TLB_CNTL)); - dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", - RREG32(mmVM_L2_CNTL)); - dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", - RREG32(mmVM_L2_CNTL2)); - dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", - RREG32(mmVM_L2_CNTL3)); - dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR)); - dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR)); - dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", - RREG32(mmVM_CONTEXT0_CNTL2)); - dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", - RREG32(mmVM_CONTEXT0_CNTL)); - dev_info(adev->dev, " 0x15D4=0x%08X\n", - RREG32(0x575)); - dev_info(adev->dev, " 0x15D8=0x%08X\n", - RREG32(0x576)); - dev_info(adev->dev, " 0x15DC=0x%08X\n", - RREG32(0x577)); - dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", - RREG32(mmVM_CONTEXT1_CNTL2)); - dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", - RREG32(mmVM_CONTEXT1_CNTL)); - for (i = 0; i < 16; i++) { - if (i < 8) - dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", - i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i)); - else - dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", - i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8)); - } - dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", - RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR)); - dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", - RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR)); - dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", - RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR)); - dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", - RREG32(mmMC_VM_FB_LOCATION)); - dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", - RREG32(mmMC_VM_AGP_BASE)); - dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", - RREG32(mmMC_VM_AGP_TOP)); - dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", - RREG32(mmMC_VM_AGP_BOT)); - - if (adev->asic_type == CHIP_KAVERI) { - dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n", - RREG32(mmCHUB_CONTROL)); - } - - dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", - RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL)); - dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", - RREG32(mmHDP_NONSURFACE_BASE)); - dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", - RREG32(mmHDP_NONSURFACE_INFO)); - dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", - RREG32(mmHDP_NONSURFACE_SIZE)); - dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", - RREG32(mmHDP_MISC_CNTL)); - dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", - RREG32(mmHDP_HOST_PATH_CNTL)); - - for (i = 0, j = 0; i < 32; i++, j += 0x6) { - dev_info(adev->dev, " %d:\n", i); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb05 + j, RREG32(0xb05 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb06 + j, RREG32(0xb06 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb07 + j, RREG32(0xb07 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb08 + j, RREG32(0xb08 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb09 + j, RREG32(0xb09 + j)); - } - - dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", - RREG32(mmBIF_FB_EN)); -} - static int gmc_v7_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1241,10 +1113,8 @@ static int gmc_v7_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v7_0_print_status((void *)adev); - gmc_v7_0_mc_stop(adev, &save); - if (gmc_v7_0_wait_for_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1266,8 +1136,6 @@ static int gmc_v7_0_soft_reset(void *handle) gmc_v7_0_mc_resume(adev, &save); udelay(50); - - gmc_v7_0_print_status((void *)adev); } return 0; @@ -1370,6 +1238,7 @@ static int gmc_v7_0_set_powergating_state(void *handle, } const struct amd_ip_funcs gmc_v7_0_ip_funcs = { + .name = "gmc_v7_0", .early_init = gmc_v7_0_early_init, .late_init = gmc_v7_0_late_init, .sw_init = gmc_v7_0_sw_init, @@ -1381,7 +1250,6 @@ const struct amd_ip_funcs gmc_v7_0_ip_funcs = { .is_idle = gmc_v7_0_is_idle, .wait_for_idle = gmc_v7_0_wait_for_idle, .soft_reset = gmc_v7_0_soft_reset, - .print_status = gmc_v7_0_print_status, .set_clockgating_state = gmc_v7_0_set_clockgating_state, .set_powergating_state = gmc_v7_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h index 36fcbbc46ada..0b386b5d2f7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 29bd7b57dc91..717359d3ba8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -41,8 +41,11 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v8_0_wait_for_idle(void *handle); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -73,6 +76,23 @@ static const u32 fiji_mgcg_cgcg_init[] = mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; +static const u32 golden_settings_polaris11_a11[] = +{ + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff +}; + +static const u32 golden_settings_polaris10_a11[] = +{ + mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff +}; + static const u32 cz_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 @@ -103,6 +123,16 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_tonga_a11, (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); break; + case CHIP_POLARIS11: + amdgpu_program_register_sequence(adev, + golden_settings_polaris11_a11, + (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); + break; + case CHIP_POLARIS10: + amdgpu_program_register_sequence(adev, + golden_settings_polaris10_a11, + (const u32)ARRAY_SIZE(golden_settings_polaris10_a11)); + break; case CHIP_CARRIZO: amdgpu_program_register_sequence(adev, cz_mgcg_cgcg_init, @@ -118,44 +148,15 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc8_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK | - SRBM_STATUS__MCB_BUSY_MASK | - SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | - SRBM_STATUS__MCD_BUSY_MASK | - SRBM_STATUS__VMC1_BUSY_MASK); - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v8_0_wait_for_idle(adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -170,8 +171,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -209,6 +210,12 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_TONGA: chip_name = "tonga"; break; + case CHIP_POLARIS11: + chip_name = "polaris11"; + break; + case CHIP_POLARIS10: + chip_name = "polaris10"; + break; case CHIP_FIJI: case CHIP_CARRIZO: case CHIP_STONEY: @@ -358,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -378,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v8_0_mc_resume(adev, &save); @@ -863,14 +870,6 @@ static int gmc_v8_0_early_init(void *handle) gmc_v8_0_set_gart_funcs(adev); gmc_v8_0_set_irq_funcs(adev); - if (adev->flags & AMD_IS_APU) { - adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - } else { - u32 tmp = RREG32(mmMC_SEQ_MISC0); - tmp &= MC_SEQ_MISC0__MT__MASK; - adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); - } - return 0; } @@ -878,15 +877,33 @@ static int gmc_v8_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + else + return 0; } +#define mmMC_SEQ_MISC0_FIJI 0xA71 + static int gmc_v8_0_sw_init(void *handle) { int r; int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->flags & AMD_IS_APU) { + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + } else { + u32 tmp; + + if (adev->asic_type == CHIP_FIJI) + tmp = RREG32(mmMC_SEQ_MISC0_FIJI); + else + tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); + } + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; @@ -1075,111 +1092,6 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static void gmc_v8_0_print_status(void *handle) -{ - int i, j; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "GMC 8.x registers\n"); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); - dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", - RREG32(mmMC_VM_MX_L1_TLB_CNTL)); - dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", - RREG32(mmVM_L2_CNTL)); - dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", - RREG32(mmVM_L2_CNTL2)); - dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", - RREG32(mmVM_L2_CNTL3)); - dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n", - RREG32(mmVM_L2_CNTL4)); - dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR)); - dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR)); - dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", - RREG32(mmVM_CONTEXT0_CNTL2)); - dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", - RREG32(mmVM_CONTEXT0_CNTL)); - dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n", - RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR)); - dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n", - RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR)); - dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n", - RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET)); - dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", - RREG32(mmVM_CONTEXT1_CNTL2)); - dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", - RREG32(mmVM_CONTEXT1_CNTL)); - for (i = 0; i < 16; i++) { - if (i < 8) - dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", - i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i)); - else - dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", - i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8)); - } - dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", - RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR)); - dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", - RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR)); - dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", - RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR)); - dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", - RREG32(mmMC_VM_FB_LOCATION)); - dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", - RREG32(mmMC_VM_AGP_BASE)); - dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", - RREG32(mmMC_VM_AGP_TOP)); - dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", - RREG32(mmMC_VM_AGP_BOT)); - - dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", - RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL)); - dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", - RREG32(mmHDP_NONSURFACE_BASE)); - dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", - RREG32(mmHDP_NONSURFACE_INFO)); - dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", - RREG32(mmHDP_NONSURFACE_SIZE)); - dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", - RREG32(mmHDP_MISC_CNTL)); - dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", - RREG32(mmHDP_HOST_PATH_CNTL)); - - for (i = 0, j = 0; i < 32; i++, j += 0x6) { - dev_info(adev->dev, " %d:\n", i); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb05 + j, RREG32(0xb05 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb06 + j, RREG32(0xb06 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb07 + j, RREG32(0xb07 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb08 + j, RREG32(0xb08 + j)); - dev_info(adev->dev, " 0x%04X=0x%08X\n", - 0xb09 + j, RREG32(0xb09 + j)); - } - - dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", - RREG32(mmBIF_FB_EN)); -} - static int gmc_v8_0_soft_reset(void *handle) { struct amdgpu_mode_mc_save save; @@ -1199,10 +1111,8 @@ static int gmc_v8_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v8_0_print_status((void *)adev); - gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1224,8 +1134,6 @@ static int gmc_v8_0_soft_reset(void *handle) gmc_v8_0_mc_resume(adev, &save); udelay(50); - - gmc_v8_0_print_status((void *)adev); } return 0; @@ -1303,11 +1211,11 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, } static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable) + bool enable) { uint32_t data; - if (enable) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { data = RREG32(mmMC_HUB_MISC_HUB_CG); data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK; WREG32(mmMC_HUB_MISC_HUB_CG, data); @@ -1383,11 +1291,11 @@ static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev, } static void fiji_update_mc_light_sleep(struct amdgpu_device *adev, - bool enable) + bool enable) { uint32_t data; - if (enable) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { data = RREG32(mmMC_HUB_MISC_HUB_CG); data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; WREG32(mmMC_HUB_MISC_HUB_CG, data); @@ -1487,6 +1395,7 @@ static int gmc_v8_0_set_powergating_state(void *handle, } const struct amd_ip_funcs gmc_v8_0_ip_funcs = { + .name = "gmc_v8_0", .early_init = gmc_v8_0_early_init, .late_init = gmc_v8_0_late_init, .sw_init = gmc_v8_0_sw_init, @@ -1498,7 +1407,6 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .is_idle = gmc_v8_0_is_idle, .wait_for_idle = gmc_v8_0_wait_for_idle, .soft_reset = gmc_v8_0_soft_reset, - .print_status = gmc_v8_0_print_status, .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h index 973436086b38..fc5001a8119d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 208d55f41c7f..2f078ad6095c 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -24,7 +24,7 @@ #include <linux/firmware.h> #include "drmP.h" #include "amdgpu.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) static int iceland_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -157,6 +162,7 @@ static int iceland_dpm_set_powergating_state(void *handle, } const struct amd_ip_funcs iceland_dpm_ip_funcs = { + .name = "iceland_dpm", .early_init = iceland_dpm_early_init, .late_init = NULL, .sw_init = iceland_dpm_sw_init, @@ -168,7 +174,6 @@ const struct amd_ip_funcs iceland_dpm_ip_funcs = { .is_idle = NULL, .wait_for_idle = NULL, .soft_reset = NULL, - .print_status = NULL, .set_clockgating_state = iceland_dpm_set_clockgating_state, .set_powergating_state = iceland_dpm_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 679e7394a495..3b8906ce3511 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev) */ static int iceland_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; u64 wptr_off; @@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ iceland_ih_enable_interrupts(adev); - return ret; + return 0; } /** @@ -351,35 +350,6 @@ static int iceland_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void iceland_ih_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "ICELAND IH registers\n"); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", - RREG32(mmINTERRUPT_CNTL)); - dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", - RREG32(mmINTERRUPT_CNTL2)); - dev_info(adev->dev, " IH_CNTL=0x%08X\n", - RREG32(mmIH_CNTL)); - dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", - RREG32(mmIH_RB_CNTL)); - dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", - RREG32(mmIH_RB_BASE)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_LO)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_HI)); - dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", - RREG32(mmIH_RB_RPTR)); - dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", - RREG32(mmIH_RB_WPTR)); -} - static int iceland_ih_soft_reset(void *handle) { u32 srbm_soft_reset = 0; @@ -391,8 +361,6 @@ static int iceland_ih_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { - iceland_ih_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -407,8 +375,6 @@ static int iceland_ih_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - iceland_ih_print_status((void *)adev); } return 0; @@ -427,6 +393,7 @@ static int iceland_ih_set_powergating_state(void *handle, } const struct amd_ip_funcs iceland_ih_ip_funcs = { + .name = "iceland_ih", .early_init = iceland_ih_early_init, .late_init = NULL, .sw_init = iceland_ih_sw_init, @@ -438,7 +405,6 @@ const struct amd_ip_funcs iceland_ih_ip_funcs = { .is_idle = iceland_ih_is_idle, .wait_for_idle = iceland_ih_wait_for_idle, .soft_reset = iceland_ih_soft_reset, - .print_status = iceland_ih_print_status, .set_clockgating_state = iceland_ih_set_clockgating_state, .set_powergating_state = iceland_ih_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 52ee08193295..211839913728 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -25,7 +25,7 @@ #include "drmP.h" #include "amdgpu.h" #include "ppsmc.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" #include "smu_ucode_xfer_vi.h" #include "amdgpu_ucode.h" @@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!iceland_is_smc_ram_running(adev)) - return -EINVAL;; + return -EINVAL; if (wait_smu_response(adev)) { DRM_ERROR("Failed to send previous message\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h index 1e0769e110fa..5983e3150cc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h @@ -21,8 +21,8 @@ * */ -#ifndef ICELAND_SMUMGR_H -#define ICELAND_SMUMGR_H +#ifndef ICELAND_SMUM_H +#define ICELAND_SMUM_H #include "ppsmc.h" diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 654d76723bc3..a845e883f5fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -135,11 +135,6 @@ static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) #endif } -static u32 sumo_get_sleep_divider_from_id(u32 id) -{ - return 1 << id; -} - static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, ATOM_AVAILABLE_SCLK_LIST *table) @@ -196,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, vid_mapping_table->num_entries = i; } +#if 0 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { { 0, 4, 1 }, @@ -294,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] = { { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; +#endif static const struct kv_pt_config_reg didt_config_kv[] = { @@ -512,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = kv_program_pt_config_registers(adev, didt_config_kv); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } kv_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; @@ -2176,8 +2173,7 @@ static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, struct kv_power_info *pi = kv_get_pi(adev); u32 i; u32 temp; - u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? - min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; + u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); if (sclk < min) return 0; @@ -2186,7 +2182,7 @@ static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, return 0; for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { - temp = sclk / sumo_get_sleep_divider_from_id(i); + temp = sclk >> i; if (temp >= min) break; } @@ -2258,7 +2254,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, if (pi->caps_stable_p_state) { stable_p_state_sclk = (max_limits->sclk * 75) / 100; - for (i = table->count - 1; i >= 0; i++) { + for (i = table->count - 1; i >= 0; i--) { if (stable_p_state_sclk >= table->entries[i].clk) { stable_p_state_sclk = table->entries[i].clk; break; @@ -3147,62 +3143,6 @@ static int kv_dpm_wait_for_idle(void *handle) return 0; } -static void kv_dpm_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "KV/KB DPM registers\n"); - dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_SQ_CTRL0)); - dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_DB_CTRL0)); - dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_TD_CTRL0)); - dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n", - RREG32_DIDT(ixDIDT_TCP_CTRL0)); - dev_info(adev->dev, " LCAC_SX0_OVR_SEL=0x%08X\n", - RREG32_SMC(ixLCAC_SX0_OVR_SEL)); - dev_info(adev->dev, " LCAC_SX0_OVR_VAL=0x%08X\n", - RREG32_SMC(ixLCAC_SX0_OVR_VAL)); - dev_info(adev->dev, " LCAC_MC0_OVR_SEL=0x%08X\n", - RREG32_SMC(ixLCAC_MC0_OVR_SEL)); - dev_info(adev->dev, " LCAC_MC0_OVR_VAL=0x%08X\n", - RREG32_SMC(ixLCAC_MC0_OVR_VAL)); - dev_info(adev->dev, " LCAC_MC1_OVR_SEL=0x%08X\n", - RREG32_SMC(ixLCAC_MC1_OVR_SEL)); - dev_info(adev->dev, " LCAC_MC1_OVR_VAL=0x%08X\n", - RREG32_SMC(ixLCAC_MC1_OVR_VAL)); - dev_info(adev->dev, " LCAC_MC2_OVR_SEL=0x%08X\n", - RREG32_SMC(ixLCAC_MC2_OVR_SEL)); - dev_info(adev->dev, " LCAC_MC2_OVR_VAL=0x%08X\n", - RREG32_SMC(ixLCAC_MC2_OVR_VAL)); - dev_info(adev->dev, " LCAC_MC3_OVR_SEL=0x%08X\n", - RREG32_SMC(ixLCAC_MC3_OVR_SEL)); - dev_info(adev->dev, " LCAC_MC3_OVR_VAL=0x%08X\n", - RREG32_SMC(ixLCAC_MC3_OVR_VAL)); - dev_info(adev->dev, " LCAC_CPL_OVR_SEL=0x%08X\n", - RREG32_SMC(ixLCAC_CPL_OVR_SEL)); - dev_info(adev->dev, " LCAC_CPL_OVR_VAL=0x%08X\n", - RREG32_SMC(ixLCAC_CPL_OVR_VAL)); - dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n", - RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0)); - dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n", - RREG32_SMC(ixGENERAL_PWRMGT)); - dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n", - RREG32_SMC(ixSCLK_PWRMGT_CNTL)); - dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n", - RREG32(mmSMC_MESSAGE_0)); - dev_info(adev->dev, " SMC_RESP_0=0x%08X\n", - RREG32(mmSMC_RESP_0)); - dev_info(adev->dev, " SMC_MSG_ARG_0=0x%08X\n", - RREG32(mmSMC_MSG_ARG_0)); - dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n", - RREG32(mmSMC_IND_INDEX_0)); - dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n", - RREG32(mmSMC_IND_DATA_0)); - dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n", - RREG32(mmSMC_IND_ACCESS_CNTL)); -} static int kv_dpm_soft_reset(void *handle) { @@ -3300,6 +3240,7 @@ static int kv_dpm_set_powergating_state(void *handle, } const struct amd_ip_funcs kv_dpm_ip_funcs = { + .name = "kv_dpm", .early_init = kv_dpm_early_init, .late_init = kv_dpm_late_init, .sw_init = kv_dpm_sw_init, @@ -3311,7 +3252,6 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = { .is_idle = kv_dpm_is_idle, .wait_for_idle = kv_dpm_wait_for_idle, .soft_reset = kv_dpm_soft_reset, - .print_status = kv_dpm_print_status, .set_clockgating_state = kv_dpm_set_clockgating_state, .set_powergating_state = kv_dpm_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h index 7837f2ecc357..8463245f424f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h +++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h @@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result; #define PPSMC_StartFanControl ((uint8_t)0x5B) #define PPSMC_StopFanControl ((uint8_t)0x5C) #define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) +#define PPSMC_NoDisplay ((uint8_t)0x5D) #define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) +#define PPSMC_HasDisplay ((uint8_t)0x5E) #define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) #define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) #define PPSMC_MSG_EnableULV ((uint8_t)0x62) @@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_DisableDTE ((uint8_t)0x88) #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) /* CI/KV/KB */ #define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) @@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) #define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) #define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) #define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) #define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 6e0a86a563f3..1351c7e834a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v2_4_init_microcode - load ucode images from disk * @@ -242,22 +251,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (VI). */ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 vmid = ib->vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); + u32 vmid = vm_id & 0xf; /* IB packet must end on a 8 DW boundary */ sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -396,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v2_4_gfx_stop(adev); sdma_v2_4_rlc_stop(adev); } @@ -460,6 +457,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -488,7 +487,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + sdma_v2_4_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -564,23 +567,25 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) { int r; - if (!adev->firmware.smu_load) { - r = sdma_v2_4_load_microcode(adev); - if (r) - return r; - } else { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA0); - if (r) - return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA1); - if (r) - return -EINVAL; + if (!adev->pp_enabled) { + if (!adev->firmware.smu_load) { + r = sdma_v2_4_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA0); + if (r) + return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA1); + if (r) + return -EINVAL; + } } - /* unhalt the MEs */ - sdma_v2_4_enable(adev, true); + /* halt the engine before programing */ + sdma_v2_4_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v2_4_gfx_resume(adev); @@ -663,20 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -686,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -701,32 +705,29 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: @@ -990,7 +991,7 @@ static int sdma_v2_4_sw_init(void *handle) ring->ring_obj = NULL; ring->use_doorbell = false; sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 256 * 1024, + r = amdgpu_ring_init(adev, ring, 1024, SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, &adev->sdma.trap_irq, (i == 0) ? @@ -1011,6 +1012,7 @@ static int sdma_v2_4_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v2_4_free_microcode(adev); return 0; } @@ -1080,55 +1082,6 @@ static int sdma_v2_4_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void sdma_v2_4_print_status(void *handle) -{ - int i, j; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "VI SDMA registers\n"); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - for (i = 0; i < adev->sdma.num_instances; i++) { - dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", - i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", - i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); - mutex_lock(&adev->srbm_mutex); - for (j = 0; j < 16; j++) { - vi_srbm_select(adev, 0, 0, 0, j); - dev_info(adev->dev, " VM %d:\n", j); - dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); - } - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); - } -} - static int sdma_v2_4_soft_reset(void *handle) { u32 srbm_soft_reset = 0; @@ -1151,8 +1104,6 @@ static int sdma_v2_4_soft_reset(void *handle) } if (srbm_soft_reset) { - sdma_v2_4_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -1167,8 +1118,6 @@ static int sdma_v2_4_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - sdma_v2_4_print_status((void *)adev); } return 0; @@ -1283,6 +1232,7 @@ static int sdma_v2_4_set_powergating_state(void *handle, } const struct amd_ip_funcs sdma_v2_4_ip_funcs = { + .name = "sdma_v2_4", .early_init = sdma_v2_4_early_init, .late_init = NULL, .sw_init = sdma_v2_4_sw_init, @@ -1294,7 +1244,6 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .is_idle = sdma_v2_4_is_idle, .wait_for_idle = sdma_v2_4_wait_for_idle, .soft_reset = sdma_v2_4_soft_reset, - .print_status = sdma_v2_4_print_status, .set_clockgating_state = sdma_v2_4_set_clockgating_state, .set_powergating_state = sdma_v2_4_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 8c8ca98dd129..653ce5ed55ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -56,6 +56,11 @@ MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin"); MODULE_FIRMWARE("amdgpu/fiji_sdma.bin"); MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin"); MODULE_FIRMWARE("amdgpu/stoney_sdma.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); + static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = { @@ -101,6 +106,34 @@ static const u32 fiji_mgcg_cgcg_init[] = mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 }; +static const u32 golden_settings_polaris11_a11[] = +{ + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, +}; + +static const u32 golden_settings_polaris10_a11[] = +{ + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, +}; + static const u32 cz_golden_settings_a11[] = { mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, @@ -172,6 +205,16 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_tonga_a11, (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); break; + case CHIP_POLARIS11: + amdgpu_program_register_sequence(adev, + golden_settings_polaris11_a11, + (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); + break; + case CHIP_POLARIS10: + amdgpu_program_register_sequence(adev, + golden_settings_polaris10_a11, + (const u32)ARRAY_SIZE(golden_settings_polaris10_a11)); + break; case CHIP_CARRIZO: amdgpu_program_register_sequence(adev, cz_mgcg_cgcg_init, @@ -193,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v3_0_init_microcode - load ucode images from disk * @@ -220,6 +272,12 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) case CHIP_FIJI: chip_name = "fiji"; break; + case CHIP_POLARIS11: + chip_name = "polaris11"; + break; + case CHIP_POLARIS10: + chip_name = "polaris10"; + break; case CHIP_CARRIZO: chip_name = "carrizo"; break; @@ -353,21 +411,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (VI). */ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 vmid = ib->vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); + u32 vmid = vm_id & 0xf; /* IB packet must end on a 8 DW boundary */ sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -452,6 +499,31 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } +unsigned init_cond_exec(struct amdgpu_ring *ring) +{ + unsigned ret; + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE)); + amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, 1); + ret = ring->wptr;/* this is the offset we need patch later */ + amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ + return ret; +} + +void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) +{ + unsigned cur; + BUG_ON(ring->ring[offset] != 0x55aa55aa); + + cur = ring->wptr - 1; + if (likely(cur > offset)) + ring->ring[offset] = cur - offset; + else + ring->ring[offset] = (ring->ring_size>>2) - offset + cur; +} + + /** * sdma_v3_0_gfx_stop - stop the gfx async dma engines * @@ -532,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v3_0_gfx_stop(adev); sdma_v3_0_rlc_stop(adev); } @@ -597,6 +669,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -636,7 +710,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + + /* unhalt the MEs */ + sdma_v3_0_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v3_0_ctx_switch_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -729,10 +811,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) } } - /* unhalt the MEs */ - sdma_v3_0_enable(adev, true); - /* enable sdma ring preemption */ - sdma_v3_0_ctx_switch_enable(adev, true); + /* disble sdma engine before programing it */ + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v3_0_gfx_resume(adev); @@ -815,20 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -838,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -853,31 +933,28 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: @@ -1151,7 +1228,7 @@ static int sdma_v3_0_sw_init(void *handle) AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1; sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 256 * 1024, + r = amdgpu_ring_init(adev, ring, 1024, SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, &adev->sdma.trap_irq, (i == 0) ? @@ -1172,6 +1249,7 @@ static int sdma_v3_0_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v3_0_free_microcode(adev); return 0; } @@ -1242,57 +1320,6 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void sdma_v3_0_print_status(void *handle) -{ - int i, j; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "VI SDMA registers\n"); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - for (i = 0; i < adev->sdma.num_instances; i++) { - dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", - i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", - i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", - i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); - mutex_lock(&adev->srbm_mutex); - for (j = 0; j < 16; j++) { - vi_srbm_select(adev, 0, 0, 0, j); - dev_info(adev->dev, " VM %d:\n", j); - dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", - i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); - dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", - i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); - } - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); - } -} - static int sdma_v3_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0; @@ -1315,8 +1342,6 @@ static int sdma_v3_0_soft_reset(void *handle) } if (srbm_soft_reset) { - sdma_v3_0_print_status((void *)adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -1331,8 +1356,6 @@ static int sdma_v3_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - sdma_v3_0_print_status((void *)adev); } return 0; @@ -1433,40 +1456,31 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, return 0; } -static void fiji_update_sdma_medium_grain_clock_gating( +static void sdma_v3_0_update_sdma_medium_grain_clock_gating( struct amdgpu_device *adev, bool enable) { uint32_t temp, data; + int i; - if (enable) { - temp = data = RREG32(mmSDMA0_CLK_CTRL); - data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | - SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | - SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | - SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | - SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | - SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | - SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | - SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); - if (data != temp) - WREG32(mmSDMA0_CLK_CTRL, data); - - temp = data = RREG32(mmSDMA1_CLK_CTRL); - data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK); - - if (data != temp) - WREG32(mmSDMA1_CLK_CTRL, data); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]); + data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); + if (data != temp) + WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data); + } } else { - temp = data = RREG32(mmSDMA0_CLK_CTRL); - data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | + for (i = 0; i < adev->sdma.num_instances; i++) { + temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]); + data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | @@ -1475,54 +1489,35 @@ static void fiji_update_sdma_medium_grain_clock_gating( SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK; - if (data != temp) - WREG32(mmSDMA0_CLK_CTRL, data); - - temp = data = RREG32(mmSDMA1_CLK_CTRL); - data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK | - SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK; - - if (data != temp) - WREG32(mmSDMA1_CLK_CTRL, data); + if (data != temp) + WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data); + } } } -static void fiji_update_sdma_medium_grain_light_sleep( +static void sdma_v3_0_update_sdma_medium_grain_light_sleep( struct amdgpu_device *adev, bool enable) { uint32_t temp, data; + int i; - if (enable) { - temp = data = RREG32(mmSDMA0_POWER_CNTL); - data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; - - if (temp != data) - WREG32(mmSDMA0_POWER_CNTL, data); - - temp = data = RREG32(mmSDMA1_POWER_CNTL); - data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]); + data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; - if (temp != data) - WREG32(mmSDMA1_POWER_CNTL, data); + if (temp != data) + WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data); + } } else { - temp = data = RREG32(mmSDMA0_POWER_CNTL); - data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; - - if (temp != data) - WREG32(mmSDMA0_POWER_CNTL, data); - - temp = data = RREG32(mmSDMA1_POWER_CNTL); - data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + for (i = 0; i < adev->sdma.num_instances; i++) { + temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]); + data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; - if (temp != data) - WREG32(mmSDMA1_POWER_CNTL, data); + if (temp != data) + WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data); + } } } @@ -1533,9 +1528,11 @@ static int sdma_v3_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_FIJI: - fiji_update_sdma_medium_grain_clock_gating(adev, + case CHIP_CARRIZO: + case CHIP_STONEY: + sdma_v3_0_update_sdma_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - fiji_update_sdma_medium_grain_light_sleep(adev, + sdma_v3_0_update_sdma_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); break; default: @@ -1551,6 +1548,7 @@ static int sdma_v3_0_set_powergating_state(void *handle, } const struct amd_ip_funcs sdma_v3_0_ip_funcs = { + .name = "sdma_v3_0", .early_init = sdma_v3_0_early_init, .late_init = NULL, .sw_init = sdma_v3_0_sw_init, @@ -1562,7 +1560,6 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .is_idle = sdma_v3_0_is_idle, .wait_for_idle = sdma_v3_0_wait_for_idle, .soft_reset = sdma_v3_0_soft_reset, - .print_status = sdma_v3_0_print_status, .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h index c24a81eebc7c..880152c0f775 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h +++ b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h @@ -44,6 +44,7 @@ #define UCODE_ID_IH_REG_RESTORE 11 #define UCODE_ID_VBIOS 12 #define UCODE_ID_MISC_METADATA 13 +#define UCODE_ID_SMU_SK 14 #define UCODE_ID_RLC_SCRATCH 32 #define UCODE_ID_RLC_SRM_ARAM 33 #define UCODE_ID_RLC_SRM_DRAM 34 diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index 0497784b3652..f06f6f4dc3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) static int tonga_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -143,6 +148,7 @@ static int tonga_dpm_set_powergating_state(void *handle, } const struct amd_ip_funcs tonga_dpm_ip_funcs = { + .name = "tonga_dpm", .early_init = tonga_dpm_early_init, .late_init = NULL, .sw_init = tonga_dpm_sw_init, @@ -154,7 +160,6 @@ const struct amd_ip_funcs tonga_dpm_ip_funcs = { .is_idle = NULL, .wait_for_idle = NULL, .soft_reset = NULL, - .print_status = NULL, .set_clockgating_state = tonga_dpm_set_clockgating_state, .set_powergating_state = tonga_dpm_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index b6f7d7bff929..c92055805a45 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) */ static int tonga_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; u64 wptr_off; @@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ tonga_ih_enable_interrupts(adev); - return ret; + return 0; } /** @@ -307,7 +306,7 @@ static int tonga_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); - amdgpu_irq_add_domain(adev); + amdgpu_irq_remove_domain(adev); return 0; } @@ -374,35 +373,6 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void tonga_ih_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "TONGA IH registers\n"); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", - RREG32(mmINTERRUPT_CNTL)); - dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", - RREG32(mmINTERRUPT_CNTL2)); - dev_info(adev->dev, " IH_CNTL=0x%08X\n", - RREG32(mmIH_CNTL)); - dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", - RREG32(mmIH_RB_CNTL)); - dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", - RREG32(mmIH_RB_BASE)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_LO)); - dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", - RREG32(mmIH_RB_WPTR_ADDR_HI)); - dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", - RREG32(mmIH_RB_RPTR)); - dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", - RREG32(mmIH_RB_WPTR)); -} - static int tonga_ih_soft_reset(void *handle) { u32 srbm_soft_reset = 0; @@ -414,8 +384,6 @@ static int tonga_ih_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { - tonga_ih_print_status(adev); - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -430,8 +398,6 @@ static int tonga_ih_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - tonga_ih_print_status(adev); } return 0; @@ -450,6 +416,7 @@ static int tonga_ih_set_powergating_state(void *handle, } const struct amd_ip_funcs tonga_ih_ip_funcs = { + .name = "tonga_ih", .early_init = tonga_ih_early_init, .late_init = NULL, .sw_init = tonga_ih_sw_init, @@ -461,7 +428,6 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = { .is_idle = tonga_ih_is_idle, .wait_for_idle = tonga_ih_wait_for_idle, .soft_reset = tonga_ih_soft_reset, - .print_status = tonga_ih_print_status, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 083893dd68c0..940de1836f8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!tonga_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index c606ccb38d8b..132e613ed674 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -34,6 +34,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_4_1_d.h" + static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); static void uvd_v4_2_init_cg(struct amdgpu_device *adev); static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); @@ -114,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -224,11 +226,11 @@ static int uvd_v4_2_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_suspend(adev); + r = uvd_v4_2_hw_fini(adev); if (r) return r; - r = uvd_v4_2_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; @@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v4_2_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -489,7 +517,8 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, ib->gpu_addr); @@ -498,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, } /** - * uvd_v4_2_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - -/** * uvd_v4_2_mc_resume - memory controller programming * * @adev: amdgpu_device pointer @@ -559,12 +545,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CACHE_SIZE0, size); addr += size; - size = AMDGPU_UVD_STACK_SIZE >> 3; + size = AMDGPU_UVD_HEAP_SIZE >> 3; WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); WREG32(mmUVD_VCPU_CACHE_SIZE1, size); addr += size; - size = AMDGPU_UVD_HEAP_SIZE >> 3; + size = (AMDGPU_UVD_STACK_SIZE + + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); @@ -679,117 +666,6 @@ static int uvd_v4_2_soft_reset(void *handle) return uvd_v4_2_start(adev); } -static void uvd_v4_2_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - dev_info(adev->dev, "UVD 4.2 registers\n"); - dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", - RREG32(mmUVD_SEMA_ADDR_LOW)); - dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", - RREG32(mmUVD_SEMA_ADDR_HIGH)); - dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", - RREG32(mmUVD_SEMA_CMD)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_CMD)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_DATA0)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_DATA1)); - dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", - RREG32(mmUVD_ENGINE_CNTL)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_CNTL)); - dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", - RREG32(mmUVD_LMI_EXT40_ADDR)); - dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", - RREG32(mmUVD_CTX_INDEX)); - dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", - RREG32(mmUVD_CTX_DATA)); - dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", - RREG32(mmUVD_CGC_GATE)); - dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", - RREG32(mmUVD_CGC_CTRL)); - dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", - RREG32(mmUVD_LMI_CTRL2)); - dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", - RREG32(mmUVD_MASTINT_EN)); - dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", - RREG32(mmUVD_LMI_ADDR_EXT)); - dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", - RREG32(mmUVD_LMI_CTRL)); - dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", - RREG32(mmUVD_LMI_SWAP_CNTL)); - dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", - RREG32(mmUVD_MP_SWAP_CNTL)); - dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXA0)); - dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXA1)); - dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXB0)); - dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXB1)); - dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUX)); - dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", - RREG32(mmUVD_MPC_SET_ALU)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET0)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE0)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET1)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE1)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET2)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE2)); - dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", - RREG32(mmUVD_VCPU_CNTL)); - dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", - RREG32(mmUVD_SOFT_RESET)); - dev_info(adev->dev, " UVD_RBC_IB_BASE=0x%08X\n", - RREG32(mmUVD_RBC_IB_BASE)); - dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", - RREG32(mmUVD_RBC_IB_SIZE)); - dev_info(adev->dev, " UVD_RBC_RB_BASE=0x%08X\n", - RREG32(mmUVD_RBC_RB_BASE)); - dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", - RREG32(mmUVD_RBC_RB_RPTR)); - dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", - RREG32(mmUVD_RBC_RB_WPTR)); - dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", - RREG32(mmUVD_RBC_RB_WPTR_CNTL)); - dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", - RREG32(mmUVD_RBC_RB_CNTL)); - dev_info(adev->dev, " UVD_STATUS=0x%08X\n", - RREG32(mmUVD_STATUS)); - dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", - RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); - dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", - RREG32(mmUVD_CONTEXT_ID)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); - -} - static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -849,6 +725,7 @@ static int uvd_v4_2_set_powergating_state(void *handle, } const struct amd_ip_funcs uvd_v4_2_ip_funcs = { + .name = "uvd_v4_2", .early_init = uvd_v4_2_early_init, .late_init = NULL, .sw_init = uvd_v4_2_sw_init, @@ -860,7 +737,6 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .is_idle = uvd_v4_2_is_idle, .wait_for_idle = uvd_v4_2_wait_for_idle, .soft_reset = uvd_v4_2_soft_reset, - .print_status = uvd_v4_2_print_status, .set_clockgating_state = uvd_v4_2_set_clockgating_state, .set_powergating_state = uvd_v4_2_set_powergating_state, }; @@ -872,10 +748,14 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v4_2_ring_emit_ib, .emit_fence = uvd_v4_2_ring_emit_fence, + .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, .test_ring = uvd_v4_2_ring_test_ring, - .test_ib = uvd_v4_2_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index e3c852d9d79a..101de136ba63 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -31,6 +31,8 @@ #include "uvd/uvd_5_0_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_5_0_d.h" +#include "vi.h" static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); @@ -110,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -220,11 +222,11 @@ static int uvd_v5_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_suspend(adev); + r = uvd_v5_0_hw_fini(adev); if (r) return r; - r = uvd_v5_0_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; @@ -271,12 +273,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CACHE_SIZE0, size); offset += size; - size = AMDGPU_UVD_STACK_SIZE; + size = AMDGPU_UVD_HEAP_SIZE; WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE1, size); offset += size; - size = AMDGPU_UVD_HEAP_SIZE; + size = AMDGPU_UVD_STACK_SIZE + + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); @@ -487,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v5_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -537,7 +566,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); @@ -547,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v5_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -622,120 +609,6 @@ static int uvd_v5_0_soft_reset(void *handle) return uvd_v5_0_start(adev); } -static void uvd_v5_0_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - dev_info(adev->dev, "UVD 5.0 registers\n"); - dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", - RREG32(mmUVD_SEMA_ADDR_LOW)); - dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", - RREG32(mmUVD_SEMA_ADDR_HIGH)); - dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", - RREG32(mmUVD_SEMA_CMD)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_CMD)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_DATA0)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_DATA1)); - dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", - RREG32(mmUVD_ENGINE_CNTL)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_CNTL)); - dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", - RREG32(mmUVD_LMI_EXT40_ADDR)); - dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", - RREG32(mmUVD_CTX_INDEX)); - dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", - RREG32(mmUVD_CTX_DATA)); - dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", - RREG32(mmUVD_CGC_GATE)); - dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", - RREG32(mmUVD_CGC_CTRL)); - dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", - RREG32(mmUVD_LMI_CTRL2)); - dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", - RREG32(mmUVD_MASTINT_EN)); - dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", - RREG32(mmUVD_LMI_ADDR_EXT)); - dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", - RREG32(mmUVD_LMI_CTRL)); - dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", - RREG32(mmUVD_LMI_SWAP_CNTL)); - dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", - RREG32(mmUVD_MP_SWAP_CNTL)); - dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXA0)); - dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXA1)); - dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXB0)); - dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXB1)); - dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUX)); - dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", - RREG32(mmUVD_MPC_SET_ALU)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET0)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE0)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET1)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE1)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET2)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE2)); - dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", - RREG32(mmUVD_VCPU_CNTL)); - dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", - RREG32(mmUVD_SOFT_RESET)); - dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n", - RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW)); - dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n", - RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH)); - dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", - RREG32(mmUVD_RBC_IB_SIZE)); - dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n", - RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW)); - dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n", - RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH)); - dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", - RREG32(mmUVD_RBC_RB_RPTR)); - dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", - RREG32(mmUVD_RBC_RB_WPTR)); - dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", - RREG32(mmUVD_RBC_RB_WPTR_CNTL)); - dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", - RREG32(mmUVD_RBC_RB_CNTL)); - dev_info(adev->dev, " UVD_STATUS=0x%08X\n", - RREG32(mmUVD_STATUS)); - dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", - RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); - dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", - RREG32(mmUVD_CONTEXT_ID)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); -} - static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -754,14 +627,128 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data, data1, data2, suvd_flags; + + data = RREG32(mmUVD_CGC_CTRL); + data1 = RREG32(mmUVD_SUVD_CGC_GATE); + data2 = RREG32(mmUVD_SUVD_CGC_CTRL); + + data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | + UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); + + suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK; + + data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | + (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | + (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); + + data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__JPEG_MODE_MASK | + UVD_CGC_CTRL__SCPU_MODE_MASK); + data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | + UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | + UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | + UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | + UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); + data1 |= suvd_flags; + + WREG32(mmUVD_CGC_CTRL, data); + WREG32(mmUVD_CGC_GATE, 0); + WREG32(mmUVD_SUVD_CGC_GATE, data1); + WREG32(mmUVD_SUVD_CGC_CTRL, data2); +} + +#if 0 +static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data, data1, cgc_flags, suvd_flags; + + data = RREG32(mmUVD_CGC_GATE); + data1 = RREG32(mmUVD_SUVD_CGC_GATE); + + cgc_flags = UVD_CGC_GATE__SYS_MASK | + UVD_CGC_GATE__UDEC_MASK | + UVD_CGC_GATE__MPEG2_MASK | + UVD_CGC_GATE__RBC_MASK | + UVD_CGC_GATE__LMI_MC_MASK | + UVD_CGC_GATE__IDCT_MASK | + UVD_CGC_GATE__MPRD_MASK | + UVD_CGC_GATE__MPC_MASK | + UVD_CGC_GATE__LBSI_MASK | + UVD_CGC_GATE__LRBBM_MASK | + UVD_CGC_GATE__UDEC_RE_MASK | + UVD_CGC_GATE__UDEC_CM_MASK | + UVD_CGC_GATE__UDEC_IT_MASK | + UVD_CGC_GATE__UDEC_DB_MASK | + UVD_CGC_GATE__UDEC_MP_MASK | + UVD_CGC_GATE__WCB_MASK | + UVD_CGC_GATE__VCPU_MASK | + UVD_CGC_GATE__SCPU_MASK; + + suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK; + + data |= cgc_flags; + data1 |= suvd_flags; + + WREG32(mmUVD_CGC_GATE, data); + WREG32(mmUVD_SUVD_CGC_GATE, data1); +} +#endif + static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + static int curstate = -1; if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; + if (curstate == state) + return 0; + + curstate = state; + if (enable) { + /* disable HW gating and enable Sw gating */ + uvd_v5_0_set_sw_clock_gating(adev); + } else { + /* wait for STATUS to clear */ + if (uvd_v5_0_wait_for_idle(handle)) + return -EBUSY; + + /* enable HW gates because UVD is idle */ +/* uvd_v5_0_set_hw_clock_gating(adev); */ + } + return 0; } @@ -789,6 +776,7 @@ static int uvd_v5_0_set_powergating_state(void *handle, } const struct amd_ip_funcs uvd_v5_0_ip_funcs = { + .name = "uvd_v5_0", .early_init = uvd_v5_0_early_init, .late_init = NULL, .sw_init = uvd_v5_0_sw_init, @@ -800,7 +788,6 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .is_idle = uvd_v5_0_is_idle, .wait_for_idle = uvd_v5_0_wait_for_idle, .soft_reset = uvd_v5_0_soft_reset, - .print_status = uvd_v5_0_print_status, .set_clockgating_state = uvd_v5_0_set_clockgating_state, .set_powergating_state = uvd_v5_0_set_powergating_state, }; @@ -812,10 +799,14 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v5_0_ring_emit_ib, .emit_fence = uvd_v5_0_ring_emit_fence, + .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate, .test_ring = uvd_v5_0_ring_test_ring, - .test_ib = uvd_v5_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 3375e614ac67..7f21102bfb99 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -31,11 +31,17 @@ #include "uvd/uvd_6_0_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "bif/bif_5_1_d.h" +#include "gmc/gmc_8_1_d.h" +#include "vi.h" static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); static int uvd_v6_0_start(struct amdgpu_device *adev); static void uvd_v6_0_stop(struct amdgpu_device *adev); +static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); /** * uvd_v6_0_ring_get_rptr - get read pointer @@ -110,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -214,15 +220,16 @@ static int uvd_v6_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + r = uvd_v6_0_hw_fini(adev); + if (r) + return r; + /* Skip this for APU for now */ if (!(adev->flags & AMD_IS_APU)) { r = amdgpu_uvd_suspend(adev); if (r) return r; } - r = uvd_v6_0_hw_fini(adev); - if (r) - return r; return r; } @@ -269,20 +276,24 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CACHE_SIZE0, size); offset += size; - size = AMDGPU_UVD_STACK_SIZE; + size = AMDGPU_UVD_HEAP_SIZE; WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE1, size); offset += size; - size = AMDGPU_UVD_HEAP_SIZE; + size = AMDGPU_UVD_STACK_SIZE + + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + + WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles); } +#if 0 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, bool enable) { @@ -359,157 +370,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, WREG32(mmUVD_CGC_GATE, data); WREG32(mmUVD_SUVD_CGC_GATE, data1); } - -static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device *adev, - bool enable) -{ - u32 data, data1; - - data = RREG32(mmUVD_CGC_GATE); - data1 = RREG32(mmUVD_SUVD_CGC_GATE); - if (enable) { - data |= UVD_CGC_GATE__SYS_MASK | - UVD_CGC_GATE__UDEC_MASK | - UVD_CGC_GATE__MPEG2_MASK | - UVD_CGC_GATE__RBC_MASK | - UVD_CGC_GATE__LMI_MC_MASK | - UVD_CGC_GATE__IDCT_MASK | - UVD_CGC_GATE__MPRD_MASK | - UVD_CGC_GATE__MPC_MASK | - UVD_CGC_GATE__LBSI_MASK | - UVD_CGC_GATE__LRBBM_MASK | - UVD_CGC_GATE__UDEC_RE_MASK | - UVD_CGC_GATE__UDEC_CM_MASK | - UVD_CGC_GATE__UDEC_IT_MASK | - UVD_CGC_GATE__UDEC_DB_MASK | - UVD_CGC_GATE__UDEC_MP_MASK | - UVD_CGC_GATE__WCB_MASK | - UVD_CGC_GATE__VCPU_MASK | - UVD_CGC_GATE__SCPU_MASK; - data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK; - } else { - data &= ~(UVD_CGC_GATE__SYS_MASK | - UVD_CGC_GATE__UDEC_MASK | - UVD_CGC_GATE__MPEG2_MASK | - UVD_CGC_GATE__RBC_MASK | - UVD_CGC_GATE__LMI_MC_MASK | - UVD_CGC_GATE__LMI_UMC_MASK | - UVD_CGC_GATE__IDCT_MASK | - UVD_CGC_GATE__MPRD_MASK | - UVD_CGC_GATE__MPC_MASK | - UVD_CGC_GATE__LBSI_MASK | - UVD_CGC_GATE__LRBBM_MASK | - UVD_CGC_GATE__UDEC_RE_MASK | - UVD_CGC_GATE__UDEC_CM_MASK | - UVD_CGC_GATE__UDEC_IT_MASK | - UVD_CGC_GATE__UDEC_DB_MASK | - UVD_CGC_GATE__UDEC_MP_MASK | - UVD_CGC_GATE__WCB_MASK | - UVD_CGC_GATE__VCPU_MASK | - UVD_CGC_GATE__SCPU_MASK); - data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK); - } - WREG32(mmUVD_CGC_GATE, data); - WREG32(mmUVD_SUVD_CGC_GATE, data1); -} - -static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device *adev, - bool swmode) -{ - u32 data, data1 = 0, data2; - - /* Always un-gate UVD REGS bit */ - data = RREG32(mmUVD_CGC_GATE); - data &= ~(UVD_CGC_GATE__REGS_MASK); - WREG32(mmUVD_CGC_GATE, data); - - data = RREG32(mmUVD_CGC_CTRL); - data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | - UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); - data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | - 1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER) | - 4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY); - - data2 = RREG32(mmUVD_SUVD_CGC_CTRL); - if (swmode) { - data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | - UVD_CGC_CTRL__UDEC_CM_MODE_MASK | - UVD_CGC_CTRL__UDEC_IT_MODE_MASK | - UVD_CGC_CTRL__UDEC_DB_MODE_MASK | - UVD_CGC_CTRL__UDEC_MP_MODE_MASK | - UVD_CGC_CTRL__SYS_MODE_MASK | - UVD_CGC_CTRL__UDEC_MODE_MASK | - UVD_CGC_CTRL__MPEG2_MODE_MASK | - UVD_CGC_CTRL__REGS_MODE_MASK | - UVD_CGC_CTRL__RBC_MODE_MASK | - UVD_CGC_CTRL__LMI_MC_MODE_MASK | - UVD_CGC_CTRL__LMI_UMC_MODE_MASK | - UVD_CGC_CTRL__IDCT_MODE_MASK | - UVD_CGC_CTRL__MPRD_MODE_MASK | - UVD_CGC_CTRL__MPC_MODE_MASK | - UVD_CGC_CTRL__LBSI_MODE_MASK | - UVD_CGC_CTRL__LRBBM_MODE_MASK | - UVD_CGC_CTRL__WCB_MODE_MASK | - UVD_CGC_CTRL__VCPU_MODE_MASK | - UVD_CGC_CTRL__JPEG_MODE_MASK | - UVD_CGC_CTRL__SCPU_MODE_MASK); - data1 |= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | - UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK; - data1 &= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK; - data1 |= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2, GATER_DIV_ID); - data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | - UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | - UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | - UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | - UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); - } else { - data |= UVD_CGC_CTRL__UDEC_RE_MODE_MASK | - UVD_CGC_CTRL__UDEC_CM_MODE_MASK | - UVD_CGC_CTRL__UDEC_IT_MODE_MASK | - UVD_CGC_CTRL__UDEC_DB_MODE_MASK | - UVD_CGC_CTRL__UDEC_MP_MODE_MASK | - UVD_CGC_CTRL__SYS_MODE_MASK | - UVD_CGC_CTRL__UDEC_MODE_MASK | - UVD_CGC_CTRL__MPEG2_MODE_MASK | - UVD_CGC_CTRL__REGS_MODE_MASK | - UVD_CGC_CTRL__RBC_MODE_MASK | - UVD_CGC_CTRL__LMI_MC_MODE_MASK | - UVD_CGC_CTRL__LMI_UMC_MODE_MASK | - UVD_CGC_CTRL__IDCT_MODE_MASK | - UVD_CGC_CTRL__MPRD_MODE_MASK | - UVD_CGC_CTRL__MPC_MODE_MASK | - UVD_CGC_CTRL__LBSI_MODE_MASK | - UVD_CGC_CTRL__LRBBM_MODE_MASK | - UVD_CGC_CTRL__WCB_MODE_MASK | - UVD_CGC_CTRL__VCPU_MODE_MASK | - UVD_CGC_CTRL__SCPU_MODE_MASK; - data2 |= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | - UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | - UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | - UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | - UVD_SUVD_CGC_CTRL__SDB_MODE_MASK; - } - WREG32(mmUVD_CGC_CTRL, data); - WREG32(mmUVD_SUVD_CGC_CTRL, data2); - - data = RREG32_UVD_CTX(ixUVD_CGC_CTRL2); - data &= ~(REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) | - REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) | - REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID)); - data1 &= (REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) | - REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) | - REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID)); - data |= data1; - WREG32_UVD_CTX(ixUVD_CGC_CTRL2, data); -} +#endif /** * uvd_v6_0_start - start UVD block @@ -526,8 +387,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uint32_t mp_swap_cntl; int i, j, r; - /*disable DPG */ - WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); + /* disable DPG */ + WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); /* disable byte swapping */ lmi_swap_cntl = 0; @@ -537,11 +398,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) /* Set dynamic clock gating in S/W control mode */ if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { - if (adev->flags & AMD_IS_APU) - cz_set_uvd_clock_gating_branches(adev, false); - else - tonga_set_uvd_clock_gating_branches(adev, false); - uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true); + uvd_v6_0_set_sw_clock_gating(adev); } else { /* disable clock gating */ uint32_t data = RREG32(mmUVD_CGC_CTRL); @@ -550,17 +407,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) } /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + WREG32(mmUVD_SOFT_RESET, + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); mdelay(5); @@ -569,8 +430,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* initialize UVD memory controller */ - WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | - (1 << 21) | (1 << 9) | (1 << 20)); + WREG32(mmUVD_LMI_CTRL, + (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ @@ -592,10 +458,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* enable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 1 << 9); + WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -629,10 +495,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) return r; } /* enable master interrupt */ - WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); + WREG32_P(mmUVD_MASTINT_EN, + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); /* clear the bit 4 of UVD_STATUS */ - WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); + WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); rb_bufsz = order_base_2(ring->ring_size); tmp = 0; @@ -726,6 +594,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v6_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -776,8 +670,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); + amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); @@ -786,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v6_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) +static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct fence *fence = NULL; - int r; + uint32_t reg; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } + if (vm_id < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; + else + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, pd_addr >> 12); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - return r; + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xC); +} + +static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 0xffffffff); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xE); } static bool uvd_v6_0_is_idle(void *handle) @@ -853,112 +767,6 @@ static int uvd_v6_0_soft_reset(void *handle) return uvd_v6_0_start(adev); } -static void uvd_v6_0_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - dev_info(adev->dev, "UVD 6.0 registers\n"); - dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", - RREG32(mmUVD_SEMA_ADDR_LOW)); - dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", - RREG32(mmUVD_SEMA_ADDR_HIGH)); - dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", - RREG32(mmUVD_SEMA_CMD)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_CMD)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_DATA0)); - dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", - RREG32(mmUVD_GPCOM_VCPU_DATA1)); - dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", - RREG32(mmUVD_ENGINE_CNTL)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_CNTL)); - dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", - RREG32(mmUVD_LMI_EXT40_ADDR)); - dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", - RREG32(mmUVD_CTX_INDEX)); - dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", - RREG32(mmUVD_CTX_DATA)); - dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", - RREG32(mmUVD_CGC_GATE)); - dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", - RREG32(mmUVD_CGC_CTRL)); - dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", - RREG32(mmUVD_LMI_CTRL2)); - dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", - RREG32(mmUVD_MASTINT_EN)); - dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", - RREG32(mmUVD_LMI_ADDR_EXT)); - dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", - RREG32(mmUVD_LMI_CTRL)); - dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", - RREG32(mmUVD_LMI_SWAP_CNTL)); - dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", - RREG32(mmUVD_MP_SWAP_CNTL)); - dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXA0)); - dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXA1)); - dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXB0)); - dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUXB1)); - dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", - RREG32(mmUVD_MPC_SET_MUX)); - dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", - RREG32(mmUVD_MPC_SET_ALU)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET0)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE0)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET1)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE1)); - dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_OFFSET2)); - dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", - RREG32(mmUVD_VCPU_CACHE_SIZE2)); - dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", - RREG32(mmUVD_VCPU_CNTL)); - dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", - RREG32(mmUVD_SOFT_RESET)); - dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", - RREG32(mmUVD_RBC_IB_SIZE)); - dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", - RREG32(mmUVD_RBC_RB_RPTR)); - dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", - RREG32(mmUVD_RBC_RB_WPTR)); - dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", - RREG32(mmUVD_RBC_RB_WPTR_CNTL)); - dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", - RREG32(mmUVD_RBC_RB_CNTL)); - dev_info(adev->dev, " UVD_STATUS=0x%08X\n", - RREG32(mmUVD_STATUS)); - dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", - RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); - dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", - RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); - dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", - RREG32(mmUVD_CONTEXT_ID)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); -} - static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -977,25 +785,147 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data, data1, data2, suvd_flags; + + data = RREG32(mmUVD_CGC_CTRL); + data1 = RREG32(mmUVD_SUVD_CGC_GATE); + data2 = RREG32(mmUVD_SUVD_CGC_CTRL); + + data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | + UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); + + suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK; + + data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | + (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | + (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); + + data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__JPEG_MODE_MASK | + UVD_CGC_CTRL__SCPU_MODE_MASK | + UVD_CGC_CTRL__JPEG2_MODE_MASK); + data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | + UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | + UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | + UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | + UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); + data1 |= suvd_flags; + + WREG32(mmUVD_CGC_CTRL, data); + WREG32(mmUVD_CGC_GATE, 0); + WREG32(mmUVD_SUVD_CGC_GATE, data1); + WREG32(mmUVD_SUVD_CGC_CTRL, data2); +} + +#if 0 +static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data, data1, cgc_flags, suvd_flags; + + data = RREG32(mmUVD_CGC_GATE); + data1 = RREG32(mmUVD_SUVD_CGC_GATE); + + cgc_flags = UVD_CGC_GATE__SYS_MASK | + UVD_CGC_GATE__UDEC_MASK | + UVD_CGC_GATE__MPEG2_MASK | + UVD_CGC_GATE__RBC_MASK | + UVD_CGC_GATE__LMI_MC_MASK | + UVD_CGC_GATE__IDCT_MASK | + UVD_CGC_GATE__MPRD_MASK | + UVD_CGC_GATE__MPC_MASK | + UVD_CGC_GATE__LBSI_MASK | + UVD_CGC_GATE__LRBBM_MASK | + UVD_CGC_GATE__UDEC_RE_MASK | + UVD_CGC_GATE__UDEC_CM_MASK | + UVD_CGC_GATE__UDEC_IT_MASK | + UVD_CGC_GATE__UDEC_DB_MASK | + UVD_CGC_GATE__UDEC_MP_MASK | + UVD_CGC_GATE__WCB_MASK | + UVD_CGC_GATE__VCPU_MASK | + UVD_CGC_GATE__SCPU_MASK | + UVD_CGC_GATE__JPEG_MASK | + UVD_CGC_GATE__JPEG2_MASK; + + suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK; + + data |= cgc_flags; + data1 |= suvd_flags; + + WREG32(mmUVD_CGC_GATE, data); + WREG32(mmUVD_SUVD_CGC_GATE, data1); +} +#endif + +static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); + + if (enable) + tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | + GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); + else + tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | + GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); + + WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); +} + static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + static int curstate = -1; + + if (adev->asic_type == CHIP_FIJI || + adev->asic_type == CHIP_POLARIS10) + uvd_v6_set_bypass_mode(adev, enable); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; + if (curstate == state) + return 0; + + curstate = state; if (enable) { - if (adev->flags & AMD_IS_APU) - cz_set_uvd_clock_gating_branches(adev, enable); - else - tonga_set_uvd_clock_gating_branches(adev, enable); - uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true); + /* disable HW gating and enable Sw gating */ + uvd_v6_0_set_sw_clock_gating(adev); } else { - uint32_t data = RREG32(mmUVD_CGC_CTRL); - data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, data); + /* wait for STATUS to clear */ + if (uvd_v6_0_wait_for_idle(handle)) + return -EBUSY; + + /* enable HW gates because UVD is idle */ +/* uvd_v6_0_set_hw_clock_gating(adev); */ } return 0; @@ -1025,6 +955,7 @@ static int uvd_v6_0_set_powergating_state(void *handle, } const struct amd_ip_funcs uvd_v6_0_ip_funcs = { + .name = "uvd_v6_0", .early_init = uvd_v6_0_early_init, .late_init = NULL, .sw_init = uvd_v6_0_sw_init, @@ -1036,27 +967,55 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .is_idle = uvd_v6_0_is_idle, .wait_for_idle = uvd_v6_0_wait_for_idle, .soft_reset = uvd_v6_0_soft_reset, - .print_status = uvd_v6_0_print_status, .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, }; -static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { +static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, .set_wptr = uvd_v6_0_ring_set_wptr, .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, .test_ring = uvd_v6_0_ring_test_ring, - .test_ib = uvd_v6_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, +}; + +static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { + .get_rptr = uvd_v6_0_ring_get_rptr, + .get_wptr = uvd_v6_0_ring_get_wptr, + .set_wptr = uvd_v6_0_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = uvd_v6_0_ring_emit_ib, + .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, + .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, + .test_ring = uvd_v6_0_ring_test_ring, + .test_ib = amdgpu_uvd_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; + if (adev->asic_type >= CHIP_POLARIS10) { + adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + DRM_INFO("UVD is enabled in VM mode\n"); + } else { + adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + DRM_INFO("UVD is enabled in physical mode\n"); + } } static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index c7e885bcfd41..80a37a602181 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -44,7 +44,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev); static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev); - +static int vce_v2_0_wait_for_idle(void *handle); /** * vce_v2_0_ring_get_rptr - get read pointer * @@ -201,14 +201,14 @@ static int vce_v2_0_sw_init(void *handle) ring = &adev->vce.ring[0]; sprintf(ring->name, "vce0"); - r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); if (r) return r; ring = &adev->vce.ring[1]; sprintf(ring->name, "vce1"); - r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); if (r) return r; @@ -240,7 +240,8 @@ static int vce_v2_0_hw_init(void *handle) r = vce_v2_0_start(adev); if (r) - return r; +/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */ + return 0; ring = &adev->vce.ring[0]; ring->ready = true; @@ -318,7 +319,7 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - } else { + } else { tmp = RREG32(mmVCE_CLOCK_GATING_B); tmp |= 0xe7; tmp &= ~0xe70000; @@ -339,6 +340,21 @@ static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) { u32 orig, tmp; + if (gated) { + if (vce_v2_0_wait_for_idle(adev)) { + DRM_INFO("VCE is busy, Can't set clock gateing"); + return; + } + WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(100); + WREG32(mmVCE_STATUS, 0); + } else { + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(100); + } + tmp = RREG32(mmVCE_CLOCK_GATING_B); tmp &= ~0x00060006; if (gated) { @@ -362,6 +378,7 @@ static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) if (gated) WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); } static void vce_v2_0_disable_cg(struct amdgpu_device *adev) @@ -478,75 +495,6 @@ static int vce_v2_0_soft_reset(void *handle) return vce_v2_0_start(adev); } -static void vce_v2_0_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "VCE 2.0 registers\n"); - dev_info(adev->dev, " VCE_STATUS=0x%08X\n", - RREG32(mmVCE_STATUS)); - dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", - RREG32(mmVCE_VCPU_CNTL)); - dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_OFFSET0)); - dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_SIZE0)); - dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_OFFSET1)); - dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_SIZE1)); - dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_OFFSET2)); - dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_SIZE2)); - dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", - RREG32(mmVCE_SOFT_RESET)); - dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", - RREG32(mmVCE_RB_BASE_LO2)); - dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", - RREG32(mmVCE_RB_BASE_HI2)); - dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", - RREG32(mmVCE_RB_SIZE2)); - dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", - RREG32(mmVCE_RB_RPTR2)); - dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", - RREG32(mmVCE_RB_WPTR2)); - dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", - RREG32(mmVCE_RB_BASE_LO)); - dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", - RREG32(mmVCE_RB_BASE_HI)); - dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", - RREG32(mmVCE_RB_SIZE)); - dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", - RREG32(mmVCE_RB_RPTR)); - dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", - RREG32(mmVCE_RB_WPTR)); - dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", - RREG32(mmVCE_CLOCK_GATING_A)); - dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", - RREG32(mmVCE_CLOCK_GATING_B)); - dev_info(adev->dev, " VCE_CGTT_CLK_OVERRIDE=0x%08X\n", - RREG32(mmVCE_CGTT_CLK_OVERRIDE)); - dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", - RREG32(mmVCE_UENC_CLOCK_GATING)); - dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", - RREG32(mmVCE_UENC_REG_CLOCK_GATING)); - dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", - RREG32(mmVCE_SYS_INT_EN)); - dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", - RREG32(mmVCE_LMI_CTRL2)); - dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", - RREG32(mmVCE_LMI_CTRL)); - dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", - RREG32(mmVCE_LMI_VM_CTRL)); - dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", - RREG32(mmVCE_LMI_SWAP_CNTL)); - dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", - RREG32(mmVCE_LMI_SWAP_CNTL1)); - dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", - RREG32(mmVCE_LMI_CACHE_CTRL)); -} - static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -619,6 +567,7 @@ static int vce_v2_0_set_powergating_state(void *handle, } const struct amd_ip_funcs vce_v2_0_ip_funcs = { + .name = "vce_v2_0", .early_init = vce_v2_0_early_init, .late_init = NULL, .sw_init = vce_v2_0_sw_init, @@ -630,7 +579,6 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = { .is_idle = vce_v2_0_is_idle, .wait_for_idle = vce_v2_0_wait_for_idle, .soft_reset = vce_v2_0_soft_reset, - .print_status = vce_v2_0_print_status, .set_clockgating_state = vce_v2_0_set_clockgating_state, .set_powergating_state = vce_v2_0_set_powergating_state, }; @@ -646,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index ce468ee5da2a..c271abffd8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -40,9 +40,10 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 -#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 -#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 -#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024) @@ -51,6 +52,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); +static int vce_v3_0_wait_for_idle(void *handle); /** * vce_v3_0_ring_get_rptr - get read pointer @@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, vce_v3_0_override_vce_clock_gating(adev, false); } +static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_STATUS); + + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v3_0_start - start VCE block * @@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, static int vce_v3_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int idx, i, j, r; + int idx, r; + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, ring->wptr); + WREG32(mmVCE_RB_WPTR, ring->wptr); + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, ring->wptr); + WREG32(mmVCE_RB_WPTR2, ring->wptr); + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { - if (adev->vce.harvest_config & (1 << idx)) continue; @@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev) vce_v3_0_mc_resume(adev, idx); - /* set BUSY flag */ - WREG32_P(mmVCE_STATUS, 1, ~1); + WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, + ~VCE_STATUS__JOB_BUSY_MASK); + if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - mdelay(100); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } + mdelay(100); + + r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~1); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) @@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); mutex_unlock(&adev->grbm_idx_mutex); - ring = &adev->vce.ring[0]; - WREG32(mmVCE_RB_RPTR, ring->wptr); - WREG32(mmVCE_RB_WPTR, ring->wptr); - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + return 0; +} - ring = &adev->vce.ring[1]; - WREG32(mmVCE_RB_RPTR2, ring->wptr); - WREG32(mmVCE_RB_WPTR2, ring->wptr); - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); +static int vce_v3_0_stop(struct amdgpu_device *adev) +{ + int idx; + + mutex_lock(&adev->grbm_idx_mutex); + for (idx = 0; idx < 2; ++idx) { + if (adev->vce.harvest_config & (1 << idx)) + continue; + + if (idx == 0) + WREG32_P(mmGRBM_GFX_INDEX, 0, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + else + WREG32_P(mmGRBM_GFX_INDEX, + GRBM_GFX_INDEX__VCE_INSTANCE_MASK, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + + if (adev->asic_type >= CHIP_STONEY) + WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); + else + WREG32_P(mmVCE_VCPU_CNTL, 0, + ~VCE_VCPU_CNTL__CLK_EN_MASK); + /* hold on ECPU */ + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + /* clear BUSY flag */ + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + + /* Set Clock-Gating off */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) + vce_v3_0_set_vce_sw_clock_gating(adev, false); + } + + WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + mutex_unlock(&adev->grbm_idx_mutex); return 0; } @@ -315,9 +359,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) { u32 tmp; - /* Fiji, Stoney are single pipe */ + /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */ if ((adev->asic_type == CHIP_FIJI) || - (adev->asic_type == CHIP_STONEY)) + (adev->asic_type == CHIP_STONEY) || + (adev->asic_type == CHIP_POLARIS10) || + (adev->asic_type == CHIP_POLARIS11)) return AMDGPU_VCE_HARVEST_VCE1; /* Tonga and CZ are dual or single pipe */ @@ -381,14 +427,14 @@ static int vce_v3_0_sw_init(void *handle) ring = &adev->vce.ring[0]; sprintf(ring->name, "vce0"); - r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); if (r) return r; ring = &adev->vce.ring[1]; sprintf(ring->name, "vce1"); - r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); if (r) return r; @@ -439,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle) static int vce_v3_0_hw_fini(void *handle) { - return 0; + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vce_v3_0_wait_for_idle(handle); + if (r) + return r; + + return vce_v3_0_stop(adev); } static int vce_v3_0_suspend(void *handle) @@ -564,73 +617,6 @@ static int vce_v3_0_soft_reset(void *handle) return vce_v3_0_start(adev); } -static void vce_v3_0_print_status(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "VCE 3.0 registers\n"); - dev_info(adev->dev, " VCE_STATUS=0x%08X\n", - RREG32(mmVCE_STATUS)); - dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", - RREG32(mmVCE_VCPU_CNTL)); - dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_OFFSET0)); - dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_SIZE0)); - dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_OFFSET1)); - dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_SIZE1)); - dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_OFFSET2)); - dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", - RREG32(mmVCE_VCPU_CACHE_SIZE2)); - dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", - RREG32(mmVCE_SOFT_RESET)); - dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", - RREG32(mmVCE_RB_BASE_LO2)); - dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", - RREG32(mmVCE_RB_BASE_HI2)); - dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", - RREG32(mmVCE_RB_SIZE2)); - dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", - RREG32(mmVCE_RB_RPTR2)); - dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", - RREG32(mmVCE_RB_WPTR2)); - dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", - RREG32(mmVCE_RB_BASE_LO)); - dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", - RREG32(mmVCE_RB_BASE_HI)); - dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", - RREG32(mmVCE_RB_SIZE)); - dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", - RREG32(mmVCE_RB_RPTR)); - dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", - RREG32(mmVCE_RB_WPTR)); - dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", - RREG32(mmVCE_CLOCK_GATING_A)); - dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", - RREG32(mmVCE_CLOCK_GATING_B)); - dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", - RREG32(mmVCE_UENC_CLOCK_GATING)); - dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", - RREG32(mmVCE_UENC_REG_CLOCK_GATING)); - dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", - RREG32(mmVCE_SYS_INT_EN)); - dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", - RREG32(mmVCE_LMI_CTRL2)); - dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", - RREG32(mmVCE_LMI_CTRL)); - dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", - RREG32(mmVCE_LMI_VM_CTRL)); - dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", - RREG32(mmVCE_LMI_SWAP_CNTL)); - dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", - RREG32(mmVCE_LMI_SWAP_CNTL1)); - dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", - RREG32(mmVCE_LMI_CACHE_CTRL)); -} - static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -669,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); + + if (enable) + tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + else + tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + + WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); +} + static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -676,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; + if (adev->asic_type == CHIP_POLARIS10) + vce_v3_set_bypass_mode(adev, enable); + if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; @@ -739,6 +740,7 @@ static int vce_v3_0_set_powergating_state(void *handle, } const struct amd_ip_funcs vce_v3_0_ip_funcs = { + .name = "vce_v3_0", .early_init = vce_v3_0_early_init, .late_init = NULL, .sw_init = vce_v3_0_sw_init, @@ -750,7 +752,6 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = { .is_idle = vce_v3_0_is_idle, .wait_for_idle = vce_v3_0_wait_for_idle, .soft_reset = vce_v3_0_soft_reset, - .print_status = vce_v3_0_print_status, .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, }; @@ -766,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 1c120efa292c..03a31c53aec3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -78,6 +78,11 @@ #include "amdgpu_acp.h" #endif +MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); + /* * Indirect registers accessor */ @@ -198,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->didt_idx_lock, flags); } +static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + r = RREG32(mmGC_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); + return r; +} + +static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + WREG32(mmGC_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); +} + + static const u32 tonga_mgcg_cgcg_init[] = { mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, @@ -276,6 +304,8 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) stoney_mgcg_cgcg_init, (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: default: break; } @@ -414,11 +444,25 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { +static u32 vi_get_virtual_caps(struct amdgpu_device *adev) +{ + u32 caps = 0; + u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) + caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; + + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) + caps |= AMDGPU_VIRT_CAPS_IS_VF; + + return caps; +} + +static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { {mmGB_MACROTILE_MODE7, true}, }; -static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { +static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { {mmGB_TILE_MODE7, true}, {mmGB_TILE_MODE12, true}, {mmGB_TILE_MODE17, true}, @@ -426,7 +470,7 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { {mmGB_MACROTILE_MODE7, true}, }; -static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { +static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { {mmGRBM_STATUS, false}, {mmGRBM_STATUS2, false}, {mmGRBM_STATUS_SE0, false}, @@ -512,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -525,8 +569,8 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, static int vi_read_register(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value) { - struct amdgpu_allowed_register_entry *asic_register_table = NULL; - struct amdgpu_allowed_register_entry *asic_register_entry; + const struct amdgpu_allowed_register_entry *asic_register_table = NULL; + const struct amdgpu_allowed_register_entry *asic_register_entry; uint32_t size, i; *value = 0; @@ -537,6 +581,8 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, break; case CHIP_FIJI: case CHIP_TONGA: + case CHIP_POLARIS11: + case CHIP_POLARIS10: case CHIP_CARRIZO: case CHIP_STONEY: asic_register_table = cz_allowed_read_registers; @@ -574,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) +static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) { u32 i; @@ -589,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) - break; + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + return 0; + } udelay(1); } - + return -EINVAL; } static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -619,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun */ static int vi_asic_reset(struct amdgpu_device *adev) { + int r; + vi_set_bios_scratch_engine_hung(adev, true); - vi_gpu_pci_config_reset(adev); + r = vi_gpu_pci_config_reset(adev); vi_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -907,6 +958,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 2, + .rev = 0, + .funcs = &dce_v11_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 3, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 4, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ @@ -999,6 +1118,11 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) adev->ip_blocks = tonga_ip_blocks; adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); + break; case CHIP_CARRIZO: case CHIP_STONEY: adev->ip_blocks = cz_ip_blocks; @@ -1036,10 +1160,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .get_xclk = &vi_get_xclk, .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, - .get_cu_info = &gfx_v8_0_get_cu_info, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, + .get_virtual_caps = &vi_get_virtual_caps, }; static int vi_common_early_init(void *handle) @@ -1060,6 +1181,8 @@ static int vi_common_early_init(void *handle) adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; adev->didt_rreg = &vi_didt_rreg; adev->didt_wreg = &vi_didt_wreg; + adev->gc_cac_rreg = &vi_gc_cac_rreg; + adev->gc_cac_wreg = &vi_gc_cac_wreg; adev->asic_funcs = &vi_asic_funcs; @@ -1076,19 +1199,75 @@ static int vi_common_early_init(void *handle) adev->external_rev_id = 0x1; break; case CHIP_FIJI: - adev->cg_flags = 0; + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x3c; break; case CHIP_TONGA: - adev->cg_flags = 0; + adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; - case CHIP_CARRIZO: - case CHIP_STONEY: + case CHIP_POLARIS11: adev->cg_flags = 0; adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x5A; + break; + case CHIP_POLARIS10: + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x50; + break; + case CHIP_CARRIZO: + adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x1; + break; + case CHIP_STONEY: + adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS; adev->external_rev_id = adev->rev_id + 0x1; break; default: @@ -1164,24 +1343,19 @@ static int vi_common_wait_for_idle(void *handle) return 0; } -static void vi_common_print_status(void *handle) -{ - return; -} - static int vi_common_soft_reset(void *handle) { return 0; } -static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, - bool enable) +static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) { uint32_t temp, data; temp = data = RREG32_PCIE(ixPCIE_CNTL2); - if (enable) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; @@ -1194,14 +1368,14 @@ static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, WREG32_PCIE(ixPCIE_CNTL2, data); } -static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable) +static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) { uint32_t temp, data; temp = data = RREG32(mmHDP_HOST_PATH_CNTL); - if (enable) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; else data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; @@ -1210,14 +1384,14 @@ static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev WREG32(mmHDP_HOST_PATH_CNTL, data); } -static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev, - bool enable) +static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, + bool enable) { uint32_t temp, data; temp = data = RREG32(mmHDP_MEM_POWER_LS); - if (enable) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; else data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; @@ -1226,14 +1400,14 @@ static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev, WREG32(mmHDP_MEM_POWER_LS, data); } -static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable) +static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) { uint32_t temp, data; temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); - if (enable) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); else @@ -1245,19 +1419,28 @@ static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev } static int vi_common_set_clockgating_state(void *handle, - enum amd_clockgating_state state) + enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; switch (adev->asic_type) { case CHIP_FIJI: - fiji_update_bif_medium_grain_light_sleep(adev, + vi_update_bif_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE ? true : false); + vi_update_hdp_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE ? true : false); + vi_update_hdp_light_sleep(adev, + state == AMD_CG_STATE_GATE ? true : false); + vi_update_rom_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - fiji_update_hdp_medium_grain_clock_gating(adev, + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + vi_update_bif_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); - fiji_update_hdp_light_sleep(adev, + vi_update_hdp_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - fiji_update_rom_medium_grain_clock_gating(adev, + vi_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); break; default: @@ -1273,6 +1456,7 @@ static int vi_common_set_powergating_state(void *handle, } const struct amd_ip_funcs vi_common_ip_funcs = { + .name = "vi_common", .early_init = vi_common_early_init, .late_init = NULL, .sw_init = vi_common_sw_init, @@ -1284,7 +1468,6 @@ const struct amd_ip_funcs vi_common_ip_funcs = { .is_idle = vi_common_is_idle, .wait_for_idle = vi_common_wait_for_idle, .soft_reset = vi_common_soft_reset, - .print_status = vi_common_print_status, .set_clockgating_state = vi_common_set_clockgating_state, .set_powergating_state = vi_common_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index ace49976f7be..062ee1676480 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -54,7 +54,8 @@ #define AUD3_REGISTER_OFFSET (0x17b4 - 0x17a8) #define AUD4_REGISTER_OFFSET (0x17b8 - 0x17a8) #define AUD5_REGISTER_OFFSET (0x17bc - 0x17a8) -#define AUD6_REGISTER_OFFSET (0x17c4 - 0x17a8) +#define AUD6_REGISTER_OFFSET (0x17c0 - 0x17a8) +#define AUD7_REGISTER_OFFSET (0x17c4 - 0x17a8) /* hpd instance offsets */ #define HPD0_REGISTER_OFFSET (0x1898 - 0x1898) @@ -365,7 +366,7 @@ #define VCE_CMD_IB 0x00000002 #define VCE_CMD_FENCE 0x00000003 #define VCE_CMD_TRAP 0x00000004 -#define VCE_CMD_IB_AUTO 0x00000005 +#define VCE_CMD_IB_AUTO 0x00000005 #define VCE_CMD_SEMAPHORE 0x00000006 #endif |