diff options
Diffstat (limited to 'drivers/gpu/drm/msm')
21 files changed, 286 insertions, 218 deletions
| diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 983afeaee737..748cd379065f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)  	return true;  } +#define GBIF_CLIENT_HALT_MASK             BIT(0) +#define GBIF_ARB_HALT_MASK                BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) +{ +	struct msm_gpu *gpu = &adreno_gpu->base; + +	if (!a6xx_has_gbif(adreno_gpu)) { +		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); +		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & +								0xf) == 0xf); +		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + +		return; +	} + +	/* Halt new client requests on GBIF */ +	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); +	spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & +			(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + +	/* Halt all AXI requests on GBIF */ +	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); +	spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) & +			(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + +	/* The GBIF halt needs to be explicitly cleared */ +	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} +  /* Gracefully try to shut down the GMU and by extension the GPU */  static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)  {  	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);  	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; -	struct msm_gpu *gpu = &adreno_gpu->base;  	u32 val;  	/* @@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)  			return;  		} -		/* Clear the VBIF pipe before shutting down */ -		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); -		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) -			== 0xf); -		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); +		a6xx_bus_clear_pending_transactions(adreno_gpu);  		/* tell the GMU we want to slumber */  		a6xx_gmu_notify_slumber(gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index daf07800cde0..68af24150de5 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)  	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);  	int ret; -	/* -	 * During a previous slumber, GBIF halt is asserted to ensure -	 * no further transaction can go through GPU before GPU -	 * headswitch is turned off. -	 * -	 * This halt is deasserted once headswitch goes off but -	 * incase headswitch doesn't goes off clear GBIF halt -	 * here to ensure GPU wake-up doesn't fail because of -	 * halted GPU transactions. -	 */ -	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -  	/* Make sure the GMU keeps the GPU on while we set it up */  	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); @@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu)  	/* Select CP0 to always count cycles */  	gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); -	gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); -	gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); -	gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); -	gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); +	if (adreno_is_a630(adreno_gpu)) { +		gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); +		gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); +		gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); +		gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); +	}  	/* Enable fault detection */  	gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, @@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {  	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),  }; -#define GBIF_CLIENT_HALT_MASK             BIT(0) -#define GBIF_ARB_HALT_MASK                BIT(1) - -static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) -{ -	struct msm_gpu *gpu = &adreno_gpu->base; - -	if(!a6xx_has_gbif(adreno_gpu)){ -		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); -		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & -								0xf) == 0xf); -		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - -		return; -	} - -	/* Halt new client requests on GBIF */ -	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); -	spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & -			(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); - -	/* Halt all AXI requests on GBIF */ -	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); -	spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) & -			(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); - -	/* -	 * GMU needs DDR access in slumber path. Deassert GBIF halt now -	 * to allow for GMU to access system memory. -	 */ -	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -} -  static int a6xx_pm_resume(struct msm_gpu *gpu)  {  	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)  	devfreq_suspend_device(gpu->devfreq.devfreq); -	/* -	 * Make sure the GMU is idle before continuing (because some transitions -	 * may use VBIF -	 */ -	a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu); - -	/* Clear the VBIF pipe before shutting down */ -	/* FIXME: This accesses the GPU - do we need to make sure it is on? */ -	a6xx_bus_clear_pending_transactions(adreno_gpu); -  	return a6xx_gmu_stop(a6xx_gpu);  } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index eda11abc5f01..e450e0b97211 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -7,6 +7,7 @@  #include "a6xx_gmu.h"  #include "a6xx_gmu.xml.h" +#include "a6xx_gpu.h"  #define HFI_MSG_ID(val) [val] = #val @@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)  		NULL, 0);  } -static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)  { -	struct a6xx_hfi_msg_bw_table msg = { 0 }; +	/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ +	msg->bw_level_num = 1; + +	msg->ddr_cmds_num = 3; +	msg->ddr_wait_bitmask = 0x01; + +	msg->ddr_cmds_addrs[0] = 0x50000; +	msg->ddr_cmds_addrs[1] = 0x5003c; +	msg->ddr_cmds_addrs[2] = 0x5000c; + +	msg->ddr_cmds_data[0][0] =  0x40000000; +	msg->ddr_cmds_data[0][1] =  0x40000000; +	msg->ddr_cmds_data[0][2] =  0x40000000;  	/* -	 * The sdm845 GMU doesn't do bus frequency scaling on its own but it -	 * does need at least one entry in the list because it might be accessed -	 * when the GMU is shutting down. Send a single "off" entry. +	 * These are the CX (CNOC) votes - these are used by the GMU but the +	 * votes are known and fixed for the target  	 */ +	msg->cnoc_cmds_num = 1; +	msg->cnoc_wait_bitmask = 0x01; + +	msg->cnoc_cmds_addrs[0] = 0x5007c; +	msg->cnoc_cmds_data[0][0] =  0x40000000; +	msg->cnoc_cmds_data[1][0] =  0x60000001; +} -	msg.bw_level_num = 1; +static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ +	/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ +	msg->bw_level_num = 1; -	msg.ddr_cmds_num = 3; -	msg.ddr_wait_bitmask = 0x07; +	msg->ddr_cmds_num = 3; +	msg->ddr_wait_bitmask = 0x07; -	msg.ddr_cmds_addrs[0] = 0x50000; -	msg.ddr_cmds_addrs[1] = 0x5005c; -	msg.ddr_cmds_addrs[2] = 0x5000c; +	msg->ddr_cmds_addrs[0] = 0x50000; +	msg->ddr_cmds_addrs[1] = 0x5005c; +	msg->ddr_cmds_addrs[2] = 0x5000c; -	msg.ddr_cmds_data[0][0] =  0x40000000; -	msg.ddr_cmds_data[0][1] =  0x40000000; -	msg.ddr_cmds_data[0][2] =  0x40000000; +	msg->ddr_cmds_data[0][0] =  0x40000000; +	msg->ddr_cmds_data[0][1] =  0x40000000; +	msg->ddr_cmds_data[0][2] =  0x40000000;  	/*  	 * These are the CX (CNOC) votes.  This is used but the values for the  	 * sdm845 GMU are known and fixed so we can hard code them.  	 */ -	msg.cnoc_cmds_num = 3; -	msg.cnoc_wait_bitmask = 0x05; +	msg->cnoc_cmds_num = 3; +	msg->cnoc_wait_bitmask = 0x05; -	msg.cnoc_cmds_addrs[0] = 0x50034; -	msg.cnoc_cmds_addrs[1] = 0x5007c; -	msg.cnoc_cmds_addrs[2] = 0x5004c; +	msg->cnoc_cmds_addrs[0] = 0x50034; +	msg->cnoc_cmds_addrs[1] = 0x5007c; +	msg->cnoc_cmds_addrs[2] = 0x5004c; -	msg.cnoc_cmds_data[0][0] =  0x40000000; -	msg.cnoc_cmds_data[0][1] =  0x00000000; -	msg.cnoc_cmds_data[0][2] =  0x40000000; +	msg->cnoc_cmds_data[0][0] =  0x40000000; +	msg->cnoc_cmds_data[0][1] =  0x00000000; +	msg->cnoc_cmds_data[0][2] =  0x40000000; + +	msg->cnoc_cmds_data[1][0] =  0x60000001; +	msg->cnoc_cmds_data[1][1] =  0x20000001; +	msg->cnoc_cmds_data[1][2] =  0x60000001; +} + + +static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +{ +	struct a6xx_hfi_msg_bw_table msg = { 0 }; +	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); +	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; -	msg.cnoc_cmds_data[1][0] =  0x60000001; -	msg.cnoc_cmds_data[1][1] =  0x20000001; -	msg.cnoc_cmds_data[1][2] =  0x60000001; +	if (adreno_is_a618(adreno_gpu)) +		a618_build_bw_table(&msg); +	else +		a6xx_build_bw_table(&msg);  	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),  		NULL, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index bf513411b243..17448505a9b5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1272,6 +1272,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {  	.atomic_destroy_state = dpu_crtc_destroy_state,  	.late_register = dpu_crtc_late_register,  	.early_unregister = dpu_crtc_early_unregister, +	.enable_vblank  = msm_crtc_enable_vblank, +	.disable_vblank = msm_crtc_disable_vblank,  };  static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index f8ac3bf60fd6..58d3400668f5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -512,7 +512,6 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,  		if (cur_mode->vdisplay == adj_mode->vdisplay &&  		    cur_mode->hdisplay == adj_mode->hdisplay &&  		    drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) { -			adj_mode->private = cur_mode->private;  			adj_mode->private_flags |= cur_mode->private_flags;  		}  	} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 528632690f1e..a05282dede91 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {  	INTERLEAVED_RGB_FMT(RGB565,  		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, -		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, +		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,  		false, 2, 0,  		DPU_FETCH_LINEAR, 1),  	INTERLEAVED_RGB_FMT(BGR565,  		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, -		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, +		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,  		false, 2, 0,  		DPU_FETCH_LINEAR, 1), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 29705e773a4b..80d3cfc14007 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -12,6 +12,7 @@  #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) +#define HW_REV				0x0  #define HW_INTR_STATUS			0x0010  /* Max BW defined in KBps */ @@ -22,6 +23,17 @@ struct dpu_irq_controller {  	struct irq_domain *domain;  }; +struct dpu_hw_cfg { +	u32 val; +	u32 offset; +}; + +struct dpu_mdss_hw_init_handler { +	u32 hw_rev; +	u32 hw_reg_count; +	struct dpu_hw_cfg* hw_cfg; +}; +  struct dpu_mdss {  	struct msm_mdss base;  	void __iomem *mmio; @@ -32,6 +44,44 @@ struct dpu_mdss {  	u32 num_paths;  }; +static struct dpu_hw_cfg hw_cfg[] = { +    { +	/* UBWC global settings */ +	.val = 0x1E, +	.offset = 0x144, +    } +}; + +static struct dpu_mdss_hw_init_handler cfg_handler[] = { +    { .hw_rev = DPU_HW_VER_620, +      .hw_reg_count = ARRAY_SIZE(hw_cfg), +      .hw_cfg = hw_cfg +    }, +}; + +static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev) +{ +	int i; +	u32 count = 0; +	struct dpu_hw_cfg *hw_cfg = NULL; + +	for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) { +		if (cfg_handler[i].hw_rev == hw_rev) { +			hw_cfg = cfg_handler[i].hw_cfg; +			count = cfg_handler[i].hw_reg_count; +			break; +	    } +	} + +	for (i = 0; i < count; i++ ) { +		writel_relaxed(hw_cfg->val, +			dpu_mdss->mmio + hw_cfg->offset); +		hw_cfg++; +	} + +    return; +} +  static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,  						struct dpu_mdss *dpu_mdss)  { @@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)  	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);  	struct dss_module_power *mp = &dpu_mdss->mp;  	int ret; +	u32 mdss_rev;  	dpu_mdss_icc_request_bw(mdss);  	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); -	if (ret) +	if (ret) {  		DPU_ERROR("clock enable failed, ret:%d\n", ret); +		return ret; +	} + +	mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV); +	dpu_mdss_hw_init(dpu_mdss, mdss_rev);  	return ret;  } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index f34dca5d4532..c9239b07fe4f 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -481,6 +481,8 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {  	.reset = drm_atomic_helper_crtc_reset,  	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,  	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +	.enable_vblank  = msm_crtc_enable_vblank, +	.disable_vblank = msm_crtc_disable_vblank,  };  static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 05cc04f729d6..998bef1190a3 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -405,6 +405,83 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)  	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);  } +static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) +{ +	struct drm_device *dev = crtc->dev; +	struct drm_encoder *encoder; + +	drm_for_each_encoder(encoder, dev) +		if (encoder->crtc == crtc) +			return encoder; + +	return NULL; +} + +static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc, +					   bool in_vblank_irq, +					   int *vpos, int *hpos, +					   ktime_t *stime, ktime_t *etime, +					   const struct drm_display_mode *mode) +{ +	unsigned int pipe = crtc->index; +	struct drm_encoder *encoder; +	int line, vsw, vbp, vactive_start, vactive_end, vfp_end; + + +	encoder = get_encoder_from_crtc(crtc); +	if (!encoder) { +		DRM_ERROR("no encoder found for crtc %d\n", pipe); +		return false; +	} + +	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; +	vbp = mode->crtc_vtotal - mode->crtc_vsync_end; + +	/* +	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at +	 * the end of VFP. Translate the porch values relative to the line +	 * counter positions. +	 */ + +	vactive_start = vsw + vbp + 1; + +	vactive_end = vactive_start + mode->crtc_vdisplay; + +	/* last scan line before VSYNC */ +	vfp_end = mode->crtc_vtotal; + +	if (stime) +		*stime = ktime_get(); + +	line = mdp5_encoder_get_linecount(encoder); + +	if (line < vactive_start) +		line -= vactive_start; +	else if (line > vactive_end) +		line = line - vfp_end - vactive_start; +	else +		line -= vactive_start; + +	*vpos = line; +	*hpos = 0; + +	if (etime) +		*etime = ktime_get(); + +	return true; +} + +static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc) +{ +	struct drm_encoder *encoder; + +	encoder = get_encoder_from_crtc(crtc); +	if (!encoder) +		return 0; + +	return mdp5_encoder_get_framecount(encoder); +} +  static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,  				     struct drm_crtc_state *old_state)  { @@ -1054,6 +1131,10 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {  	.cursor_set = mdp5_crtc_cursor_set,  	.cursor_move = mdp5_crtc_cursor_move,  	.atomic_print_state = mdp5_crtc_atomic_print_state, +	.get_vblank_counter = mdp5_crtc_get_vblank_counter, +	.enable_vblank  = msm_crtc_enable_vblank, +	.disable_vblank = msm_crtc_disable_vblank, +	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,  };  static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { @@ -1063,6 +1144,7 @@ static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {  	.atomic_flush = mdp5_crtc_atomic_flush,  	.atomic_enable = mdp5_crtc_atomic_enable,  	.atomic_disable = mdp5_crtc_atomic_disable, +	.get_scanout_position = mdp5_crtc_get_scanout_position,  };  static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) @@ -1109,8 +1191,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)  	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,  						msecs_to_jiffies(50));  	if (ret == 0) -		dev_warn(dev->dev, "pp done time out, lm=%d\n", -			 mdp5_cstate->pipeline.mixer->lm); +		dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", +				     mdp5_cstate->pipeline.mixer->lm);  }  static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index e43ecd4be10a..6650f478b226 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -583,98 +583,6 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,  	return 0;  } -static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) -{ -	struct drm_device *dev = crtc->dev; -	struct drm_encoder *encoder; - -	drm_for_each_encoder(encoder, dev) -		if (encoder->crtc == crtc) -			return encoder; - -	return NULL; -} - -static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe, -				bool in_vblank_irq, int *vpos, int *hpos, -				ktime_t *stime, ktime_t *etime, -				const struct drm_display_mode *mode) -{ -	struct msm_drm_private *priv = dev->dev_private; -	struct drm_crtc *crtc; -	struct drm_encoder *encoder; -	int line, vsw, vbp, vactive_start, vactive_end, vfp_end; - -	crtc = priv->crtcs[pipe]; -	if (!crtc) { -		DRM_ERROR("Invalid crtc %d\n", pipe); -		return false; -	} - -	encoder = get_encoder_from_crtc(crtc); -	if (!encoder) { -		DRM_ERROR("no encoder found for crtc %d\n", pipe); -		return false; -	} - -	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; -	vbp = mode->crtc_vtotal - mode->crtc_vsync_end; - -	/* -	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at -	 * the end of VFP. Translate the porch values relative to the line -	 * counter positions. -	 */ - -	vactive_start = vsw + vbp + 1; - -	vactive_end = vactive_start + mode->crtc_vdisplay; - -	/* last scan line before VSYNC */ -	vfp_end = mode->crtc_vtotal; - -	if (stime) -		*stime = ktime_get(); - -	line = mdp5_encoder_get_linecount(encoder); - -	if (line < vactive_start) { -		line -= vactive_start; -	} else if (line > vactive_end) { -		line = line - vfp_end - vactive_start; -	} else { -		line -= vactive_start; -	} - -	*vpos = line; -	*hpos = 0; - -	if (etime) -		*etime = ktime_get(); - -	return true; -} - -static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) -{ -	struct msm_drm_private *priv = dev->dev_private; -	struct drm_crtc *crtc; -	struct drm_encoder *encoder; - -	if (pipe >= priv->num_crtcs) -		return 0; - -	crtc = priv->crtcs[pipe]; -	if (!crtc) -		return 0; - -	encoder = get_encoder_from_crtc(crtc); -	if (!encoder) -		return 0; - -	return mdp5_encoder_get_framecount(encoder); -} -  struct msm_kms *mdp5_kms_init(struct drm_device *dev)  {  	struct msm_drm_private *priv = dev->dev_private; @@ -762,9 +670,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)  	dev->mode_config.max_width = 0xffff;  	dev->mode_config.max_height = 0xffff; -	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; -	dev->driver->get_scanout_position = mdp5_get_scanoutpos; -	dev->driver->get_vblank_counter = mdp5_get_vblank_counter;  	dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */  	dev->vblank_disable_immediate = true; diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 104115d112eb..4b363bd7ddff 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)  	return num;  } -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,  				struct drm_display_mode *mode)  {  	int id = dsi_mgr_connector_get_id(connector); @@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)  	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);  	struct mipi_dsi_host *host = msm_dsi->host;  	struct drm_panel *panel = msm_dsi->panel; +	struct msm_dsi_pll *src_pll;  	bool is_dual_dsi = IS_DUAL_DSI();  	int ret; @@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)  								id, ret);  	} +	/* Save PLL status if it is a clock source */ +	src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); +	msm_dsi_pll_save_state(src_pll); +  	ret = msm_dsi_host_power_off(host);  	if (ret)  		pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); @@ -684,7 +689,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)  	bridge = &dsi_bridge->base;  	bridge->funcs = &dsi_mgr_bridge_funcs; -	ret = drm_bridge_attach(encoder, bridge, NULL); +	ret = drm_bridge_attach(encoder, bridge, NULL, 0);  	if (ret)  		goto fail; @@ -713,7 +718,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)  	encoder = msm_dsi->encoder;  	/* link the internal dsi bridge to the external bridge */ -	drm_bridge_attach(encoder, ext_bridge, int_bridge); +	drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);  	/*  	 * we need the drm_connector created by the external bridge diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index b0cfa67d2a57..f509ebd77500 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)  	if (!phy || !phy->cfg->ops.disable)  		return; -	/* Save PLL status if it is a clock source */ -	if (phy->usecase != MSM_DSI_PHY_SLAVE) -		msm_dsi_pll_save_state(phy->pll); -  	phy->cfg->ops.disable(phy);  	dsi_phy_regulator_disable(phy); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 8f6100db90ed..6ac04fc303f5 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)  	if (pll_10nm->slave)  		dsi_pll_enable_pll_bias(pll_10nm->slave); +	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); +	if (rc) { +		pr_err("vco_set_rate failed, rc=%d\n", rc); +		return rc; +	} +  	/* Start PLL */  	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,  		  0x01); @@ -751,9 +757,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)  	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);  	hw = clk_hw_register_mux(dev, clk_name, -				 (const char *[]){ +				 ((const char *[]){  				 parent, parent2, parent3, parent4 -				 }, 4, 0, pll_10nm->phy_cmn_mmio + +				 }), 4, 0, pll_10nm->phy_cmn_mmio +  				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,  				 0, 2, 0, NULL);  	if (IS_ERR(hw)) { diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c index 8c99e01ae332..6dffd7f4a99b 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c @@ -554,9 +554,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)  	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);  	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);  	clks[num++] = clk_register_mux(dev, clk_name, -			(const char *[]){ +			((const char *[]){  				parent1, parent2 -			}, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio + +			}), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +  			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);  	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id); diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c index ad4e963ccd9b..a78d6077802b 100644 --- a/drivers/gpu/drm/msm/edp/edp.c +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -178,7 +178,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,  		goto fail;  	} -	ret = drm_bridge_attach(encoder, edp->bridge, NULL); +	ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0);  	if (ret)  		goto fail; diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c index b65b5cc2dba2..c69a37e0c708 100644 --- a/drivers/gpu/drm/msm/edp/edp_bridge.c +++ b/drivers/gpu/drm/msm/edp/edp_bridge.c @@ -97,7 +97,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)  	bridge = &edp_bridge->base;  	bridge->funcs = &edp_bridge_funcs; -	ret = drm_bridge_attach(edp->encoder, bridge, NULL); +	ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);  	if (ret)  		goto fail; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 1a9b6289637d..3a8646535c14 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -327,7 +327,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,  		goto fail;  	} -	ret = drm_bridge_attach(encoder, hdmi->bridge, NULL); +	ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0);  	if (ret)  		goto fail; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index ba81338a9bf8..6e380db9287b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -287,7 +287,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)  	bridge = &hdmi_bridge->base;  	bridge->funcs = &msm_hdmi_bridge_funcs; -	ret = drm_bridge_attach(hdmi->encoder, bridge, NULL); +	ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);  	if (ret)  		goto fail; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f50fefb87040..2a82c23a6e4d 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,  	size = resource_size(res); -	ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); +	ptr = devm_ioremap(&pdev->dev, res->start, size);  	if (!ptr) {  		DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);  		return ERR_PTR(-ENOMEM); @@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)  	if (ret)  		goto err_msm_uninit; +	if (!dev->dma_parms) { +		dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), +					      GFP_KERNEL); +		if (!dev->dma_parms) +			return -ENOMEM; +	} +	dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); +  	msm_gem_shrinker_init(ddev);  	switch (get_mdp_ver(pdev)) { @@ -660,8 +668,10 @@ static void msm_irq_uninstall(struct drm_device *dev)  	kms->funcs->irq_uninstall(kms);  } -static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe) +int msm_crtc_enable_vblank(struct drm_crtc *crtc)  { +	struct drm_device *dev = crtc->dev; +	unsigned int pipe = crtc->index;  	struct msm_drm_private *priv = dev->dev_private;  	struct msm_kms *kms = priv->kms;  	if (!kms) @@ -670,8 +680,10 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)  	return vblank_ctrl_queue_work(priv, pipe, true);  } -static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe) +void msm_crtc_disable_vblank(struct drm_crtc *crtc)  { +	struct drm_device *dev = crtc->dev; +	unsigned int pipe = crtc->index;  	struct msm_drm_private *priv = dev->dev_private;  	struct msm_kms *kms = priv->kms;  	if (!kms) @@ -996,8 +1008,6 @@ static struct drm_driver msm_driver = {  	.irq_preinstall     = msm_irq_preinstall,  	.irq_postinstall    = msm_irq_postinstall,  	.irq_uninstall      = msm_irq_uninstall, -	.enable_vblank      = msm_enable_vblank, -	.disable_vblank     = msm_disable_vblank,  	.gem_free_object_unlocked = msm_gem_free_object,  	.gem_vm_ops         = &vm_ops,  	.dumb_create        = msm_gem_dumb_create, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 71547e756e29..194d900a460e 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -232,6 +232,9 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);  void msm_atomic_state_clear(struct drm_atomic_state *state);  void msm_atomic_state_free(struct drm_atomic_state *state); +int msm_crtc_enable_vblank(struct drm_crtc *crtc); +void msm_crtc_disable_vblank(struct drm_crtc *crtc); +  int msm_gem_init_vma(struct msm_gem_address_space *aspace,  		struct msm_gem_vma *vma, int npages);  void msm_gem_purge_vma(struct msm_gem_address_space *aspace, @@ -454,8 +457,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)  		remaining_jiffies = 0;  	} else {  		ktime_t rem = ktime_sub(*timeout, now); -		struct timespec ts = ktime_to_timespec(rem); -		remaining_jiffies = timespec_to_jiffies(&ts); +		remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);  	}  	return remaining_jiffies; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index db48867df47d..47235f8c5922 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -160,16 +160,12 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)  	drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs); -	ret = drm_fb_helper_init(dev, helper, priv->num_connectors); +	ret = drm_fb_helper_init(dev, helper);  	if (ret) {  		DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);  		goto fail;  	} -	ret = drm_fb_helper_single_add_all_connectors(helper); -	if (ret) -		goto fini; -  	/* the fw fb could be anywhere in memory */  	drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false); |