diff options
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 17 | 
1 files changed, 8 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7019a40457a6..bcf2c4cc0522 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -108,7 +108,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); -EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); @@ -4741,7 +4741,6 @@ static struct ctl_table sched_core_sysctls[] = {  		.extra2		= SYSCTL_FOUR,  	},  #endif /* CONFIG_NUMA_BALANCING */ -	{}  };  static int __init sched_core_sysctl_init(void)  { @@ -5662,13 +5661,13 @@ static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }   * This function gets called by the timer code, with HZ frequency.   * We call it with interrupts disabled.   */ -void scheduler_tick(void) +void sched_tick(void)  {  	int cpu = smp_processor_id();  	struct rq *rq = cpu_rq(cpu);  	struct task_struct *curr = rq->curr;  	struct rq_flags rf; -	unsigned long thermal_pressure; +	unsigned long hw_pressure;  	u64 resched_latency;  	if (housekeeping_cpu(cpu, HK_TYPE_TICK)) @@ -5679,8 +5678,8 @@ void scheduler_tick(void)  	rq_lock(rq, &rf);  	update_rq_clock(rq); -	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); -	update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); +	hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); +	update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);  	curr->sched_class->task_tick(rq, curr, 0);  	if (sched_feat(LATENCY_WARN))  		resched_latency = cpu_resched_latency(rq); @@ -5700,7 +5699,7 @@ void scheduler_tick(void)  #ifdef CONFIG_SMP  	rq->idle_balance = idle_cpu(cpu); -	trigger_load_balance(rq); +	sched_balance_trigger(rq);  #endif  } @@ -6585,7 +6584,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)   *      paths. For example, see arch/x86/entry_64.S.   *   *      To drive preemption between tasks, the scheduler sets the flag in timer - *      interrupt handler scheduler_tick(). + *      interrupt handler sched_tick().   *   *   3. Wakeups don't really cause entry into schedule(). They add a   *      task to the run-queue and that's it. @@ -11402,7 +11401,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,  {  	struct task_group *tg = css_tg(of_css(of));  	u64 period = tg_get_cfs_period(tg); -	u64 burst = tg_get_cfs_burst(tg); +	u64 burst = tg->cfs_bandwidth.burst;  	u64 quota;  	int ret;  |