diff options
Diffstat (limited to 'drivers/clocksource/arm_arch_timer.c')
| -rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 243 | 
1 files changed, 147 insertions, 96 deletions
| diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index be6d741d404c..9a04eacc4412 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -44,23 +44,29 @@  #define CNTACR_RWVT	BIT(4)  #define CNTACR_RWPT	BIT(5) -#define CNTVCT_LO	0x08 -#define CNTVCT_HI	0x0c +#define CNTVCT_LO	0x00 +#define CNTPCT_LO	0x08  #define CNTFRQ		0x10 -#define CNTP_TVAL	0x28 +#define CNTP_CVAL_LO	0x20  #define CNTP_CTL	0x2c -#define CNTV_TVAL	0x38 +#define CNTV_CVAL_LO	0x30  #define CNTV_CTL	0x3c -static unsigned arch_timers_present __initdata; +/* + * The minimum amount of time a generic counter is guaranteed to not roll over + * (40 years) + */ +#define MIN_ROLLOVER_SECS	(40ULL * 365 * 24 * 3600) -static void __iomem *arch_counter_base __ro_after_init; +static unsigned arch_timers_present __initdata;  struct arch_timer {  	void __iomem *base;  	struct clock_event_device evt;  }; +static struct arch_timer *arch_timer_mem __ro_after_init; +  #define to_arch_timer(e) container_of(e, struct arch_timer, evt)  static u32 arch_timer_rate __ro_after_init; @@ -96,32 +102,57 @@ static int __init early_evtstrm_cfg(char *buf)  early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);  /* + * Makes an educated guess at a valid counter width based on the Generic Timer + * specification. Of note: + *   1) the system counter is at least 56 bits wide + *   2) a roll-over time of not less than 40 years + * + * See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details. + */ +static int arch_counter_get_width(void) +{ +	u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate; + +	/* guarantee the returned width is within the valid range */ +	return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64); +} + +/*   * Architected system timer support.   */  static __always_inline -void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, +void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,  			  struct clock_event_device *clk)  {  	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {  		struct arch_timer *timer = to_arch_timer(clk);  		switch (reg) {  		case ARCH_TIMER_REG_CTRL: -			writel_relaxed(val, timer->base + CNTP_CTL); +			writel_relaxed((u32)val, timer->base + CNTP_CTL);  			break; -		case ARCH_TIMER_REG_TVAL: -			writel_relaxed(val, timer->base + CNTP_TVAL); +		case ARCH_TIMER_REG_CVAL: +			/* +			 * Not guaranteed to be atomic, so the timer +			 * must be disabled at this point. +			 */ +			writeq_relaxed(val, timer->base + CNTP_CVAL_LO);  			break; +		default: +			BUILD_BUG();  		}  	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {  		struct arch_timer *timer = to_arch_timer(clk);  		switch (reg) {  		case ARCH_TIMER_REG_CTRL: -			writel_relaxed(val, timer->base + CNTV_CTL); +			writel_relaxed((u32)val, timer->base + CNTV_CTL);  			break; -		case ARCH_TIMER_REG_TVAL: -			writel_relaxed(val, timer->base + CNTV_TVAL); +		case ARCH_TIMER_REG_CVAL: +			/* Same restriction as above */ +			writeq_relaxed(val, timer->base + CNTV_CVAL_LO);  			break; +		default: +			BUILD_BUG();  		}  	} else {  		arch_timer_reg_write_cp15(access, reg, val); @@ -140,9 +171,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,  		case ARCH_TIMER_REG_CTRL:  			val = readl_relaxed(timer->base + CNTP_CTL);  			break; -		case ARCH_TIMER_REG_TVAL: -			val = readl_relaxed(timer->base + CNTP_TVAL); -			break; +		default: +			BUILD_BUG();  		}  	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {  		struct arch_timer *timer = to_arch_timer(clk); @@ -150,9 +180,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,  		case ARCH_TIMER_REG_CTRL:  			val = readl_relaxed(timer->base + CNTV_CTL);  			break; -		case ARCH_TIMER_REG_TVAL: -			val = readl_relaxed(timer->base + CNTV_TVAL); -			break; +		default: +			BUILD_BUG();  		}  	} else {  		val = arch_timer_reg_read_cp15(access, reg); @@ -205,13 +234,11 @@ static struct clocksource clocksource_counter = {  	.id	= CSID_ARM_ARCH_COUNTER,  	.rating	= 400,  	.read	= arch_counter_read, -	.mask	= CLOCKSOURCE_MASK(56),  	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,  };  static struct cyclecounter cyclecounter __ro_after_init = {  	.read	= arch_counter_read_cc, -	.mask	= CLOCKSOURCE_MASK(56),  };  struct ate_acpi_oem_info { @@ -239,16 +266,6 @@ struct ate_acpi_oem_info {  	_new;						\  }) -static u32 notrace fsl_a008585_read_cntp_tval_el0(void) -{ -	return __fsl_a008585_read_reg(cntp_tval_el0); -} - -static u32 notrace fsl_a008585_read_cntv_tval_el0(void) -{ -	return __fsl_a008585_read_reg(cntv_tval_el0); -} -  static u64 notrace fsl_a008585_read_cntpct_el0(void)  {  	return __fsl_a008585_read_reg(cntpct_el0); @@ -285,16 +302,6 @@ static u64 notrace fsl_a008585_read_cntvct_el0(void)  	_new;							\  }) -static u32 notrace hisi_161010101_read_cntp_tval_el0(void) -{ -	return __hisi_161010101_read_reg(cntp_tval_el0); -} - -static u32 notrace hisi_161010101_read_cntv_tval_el0(void) -{ -	return __hisi_161010101_read_reg(cntv_tval_el0); -} -  static u64 notrace hisi_161010101_read_cntpct_el0(void)  {  	return __hisi_161010101_read_reg(cntpct_el0); @@ -379,16 +386,6 @@ static u64 notrace sun50i_a64_read_cntvct_el0(void)  {  	return __sun50i_a64_read_reg(cntvct_el0);  } - -static u32 notrace sun50i_a64_read_cntp_tval_el0(void) -{ -	return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0(); -} - -static u32 notrace sun50i_a64_read_cntv_tval_el0(void) -{ -	return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0(); -}  #endif  #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND @@ -397,7 +394,7 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);  static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0); -static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, +static void erratum_set_next_event_generic(const int access, unsigned long evt,  						struct clock_event_device *clk)  {  	unsigned long ctrl; @@ -418,17 +415,17 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long  	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);  } -static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt, +static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,  					    struct clock_event_device *clk)  { -	erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); +	erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);  	return 0;  } -static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt, +static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,  					    struct clock_event_device *clk)  { -	erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); +	erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);  	return 0;  } @@ -438,12 +435,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {  		.match_type = ate_match_dt,  		.id = "fsl,erratum-a008585",  		.desc = "Freescale erratum a005858", -		.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, -		.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,  		.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,  		.read_cntvct_el0 = fsl_a008585_read_cntvct_el0, -		.set_next_event_phys = erratum_set_next_event_tval_phys, -		.set_next_event_virt = erratum_set_next_event_tval_virt, +		.set_next_event_phys = erratum_set_next_event_phys, +		.set_next_event_virt = erratum_set_next_event_virt,  	},  #endif  #ifdef CONFIG_HISILICON_ERRATUM_161010101 @@ -451,23 +446,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {  		.match_type = ate_match_dt,  		.id = "hisilicon,erratum-161010101",  		.desc = "HiSilicon erratum 161010101", -		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, -		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,  		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,  		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0, -		.set_next_event_phys = erratum_set_next_event_tval_phys, -		.set_next_event_virt = erratum_set_next_event_tval_virt, +		.set_next_event_phys = erratum_set_next_event_phys, +		.set_next_event_virt = erratum_set_next_event_virt,  	},  	{  		.match_type = ate_match_acpi_oem_info,  		.id = hisi_161010101_oem_info,  		.desc = "HiSilicon erratum 161010101", -		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, -		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,  		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,  		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0, -		.set_next_event_phys = erratum_set_next_event_tval_phys, -		.set_next_event_virt = erratum_set_next_event_tval_virt, +		.set_next_event_phys = erratum_set_next_event_phys, +		.set_next_event_virt = erratum_set_next_event_virt,  	},  #endif  #ifdef CONFIG_ARM64_ERRATUM_858921 @@ -484,12 +475,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {  		.match_type = ate_match_dt,  		.id = "allwinner,erratum-unknown1",  		.desc = "Allwinner erratum UNKNOWN1", -		.read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0, -		.read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,  		.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,  		.read_cntvct_el0 = sun50i_a64_read_cntvct_el0, -		.set_next_event_phys = erratum_set_next_event_tval_phys, -		.set_next_event_virt = erratum_set_next_event_tval_virt, +		.set_next_event_phys = erratum_set_next_event_phys, +		.set_next_event_virt = erratum_set_next_event_virt,  	},  #endif  #ifdef CONFIG_ARM64_ERRATUM_1418040 @@ -727,10 +716,18 @@ static __always_inline void set_next_event(const int access, unsigned long evt,  					   struct clock_event_device *clk)  {  	unsigned long ctrl; +	u64 cnt; +  	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);  	ctrl |= ARCH_TIMER_CTRL_ENABLE;  	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; -	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); + +	if (access == ARCH_TIMER_PHYS_ACCESS) +		cnt = __arch_counter_get_cntpct(); +	else +		cnt = __arch_counter_get_cntvct(); + +	arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);  	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);  } @@ -748,23 +745,79 @@ static int arch_timer_set_next_event_phys(unsigned long evt,  	return 0;  } +static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) +{ +	u32 cnt_lo, cnt_hi, tmp_hi; + +	do { +		cnt_hi = readl_relaxed(t->base + offset_lo + 4); +		cnt_lo = readl_relaxed(t->base + offset_lo); +		tmp_hi = readl_relaxed(t->base + offset_lo + 4); +	} while (cnt_hi != tmp_hi); + +	return ((u64) cnt_hi << 32) | cnt_lo; +} + +static __always_inline void set_next_event_mem(const int access, unsigned long evt, +					   struct clock_event_device *clk) +{ +	struct arch_timer *timer = to_arch_timer(clk); +	unsigned long ctrl; +	u64 cnt; + +	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); +	ctrl |= ARCH_TIMER_CTRL_ENABLE; +	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; + +	if (access ==  ARCH_TIMER_MEM_VIRT_ACCESS) +		cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO); +	else +		cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO); + +	arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk); +	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); +} +  static int arch_timer_set_next_event_virt_mem(unsigned long evt,  					      struct clock_event_device *clk)  { -	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); +	set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);  	return 0;  }  static int arch_timer_set_next_event_phys_mem(unsigned long evt,  					      struct clock_event_device *clk)  { -	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); +	set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);  	return 0;  } +static u64 __arch_timer_check_delta(void) +{ +#ifdef CONFIG_ARM64 +	const struct midr_range broken_cval_midrs[] = { +		/* +		 * XGene-1 implements CVAL in terms of TVAL, meaning +		 * that the maximum timer range is 32bit. Shame on them. +		 */ +		MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM, +						 APM_CPU_PART_POTENZA)), +		{}, +	}; + +	if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) { +		pr_warn_once("Broken CNTx_CVAL_EL1, limiting width to 32bits"); +		return CLOCKSOURCE_MASK(32); +	} +#endif +	return CLOCKSOURCE_MASK(arch_counter_get_width()); +} +  static void __arch_timer_setup(unsigned type,  			       struct clock_event_device *clk)  { +	u64 max_delta; +  	clk->features = CLOCK_EVT_FEAT_ONESHOT;  	if (type == ARCH_TIMER_TYPE_CP15) { @@ -796,6 +849,7 @@ static void __arch_timer_setup(unsigned type,  		}  		clk->set_next_event = sne; +		max_delta = __arch_timer_check_delta();  	} else {  		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;  		clk->name = "arch_mem_timer"; @@ -812,11 +866,13 @@ static void __arch_timer_setup(unsigned type,  			clk->set_next_event =  				arch_timer_set_next_event_phys_mem;  		} + +		max_delta = CLOCKSOURCE_MASK(56);  	}  	clk->set_state_shutdown(clk); -	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); +	clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);  }  static void arch_timer_evtstrm_enable(int divider) @@ -986,15 +1042,7 @@ bool arch_timer_evtstrm_available(void)  static u64 arch_counter_get_cntvct_mem(void)  { -	u32 vct_lo, vct_hi, tmp_hi; - -	do { -		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); -		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); -		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); -	} while (vct_hi != tmp_hi); - -	return ((u64) vct_hi << 32) | vct_lo; +	return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);  }  static struct arch_timer_kvm_info arch_timer_kvm_info; @@ -1007,6 +1055,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)  static void __init arch_counter_register(unsigned type)  {  	u64 start_count; +	int width;  	/* Register the CP15 based counter if we have one */  	if (type & ARCH_TIMER_TYPE_CP15) { @@ -1031,6 +1080,10 @@ static void __init arch_counter_register(unsigned type)  		arch_timer_read_counter = arch_counter_get_cntvct_mem;  	} +	width = arch_counter_get_width(); +	clocksource_counter.mask = CLOCKSOURCE_MASK(width); +	cyclecounter.mask = CLOCKSOURCE_MASK(width); +  	if (!arch_counter_suspend_stop)  		clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;  	start_count = arch_timer_read_counter(); @@ -1040,8 +1093,7 @@ static void __init arch_counter_register(unsigned type)  	timecounter_init(&arch_timer_kvm_info.timecounter,  			 &cyclecounter, start_count); -	/* 56 bits minimum, so we assume worst case rollover */ -	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); +	sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);  }  static void arch_timer_stop(struct clock_event_device *clk) @@ -1182,25 +1234,25 @@ static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)  {  	int ret;  	irq_handler_t func; -	struct arch_timer *t; -	t = kzalloc(sizeof(*t), GFP_KERNEL); -	if (!t) +	arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL); +	if (!arch_timer_mem)  		return -ENOMEM; -	t->base = base; -	t->evt.irq = irq; -	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt); +	arch_timer_mem->base = base; +	arch_timer_mem->evt.irq = irq; +	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);  	if (arch_timer_mem_use_virtual)  		func = arch_timer_handler_virt_mem;  	else  		func = arch_timer_handler_phys_mem; -	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); +	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);  	if (ret) {  		pr_err("Failed to request mem timer irq\n"); -		kfree(t); +		kfree(arch_timer_mem); +		arch_timer_mem = NULL;  	}  	return ret; @@ -1458,7 +1510,6 @@ arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)  		return ret;  	} -	arch_counter_base = base;  	arch_timers_present |= ARCH_TIMER_TYPE_MEM;  	return 0; |